hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
ae0cab5d74c3b4b56aaf470108e2f084359fb13a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
hipError_t error = hipGetLastError ();
if (error != hipSuccess) {
printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error));
exit(-1);
}
}
__global__ void hypterm_1 (double * __restrict__ flux_0, double * __restrict__ flux_1, double * __restrict__ flux_2, double * __restrict__ flux_3, double * __restrict__ flux_4, double * __restrict__ cons_1, double * __restrict__ cons_2, double * __restrict__ cons_3, double * __restrict__ cons_4, double * __restrict__ q_1, double * __restrict__ q_2, double * __restrict__ q_3, double * __restrict__ q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.z);
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
flux_0[k*M*N+j*N+i] = -((0.8*(cons_1[k*M*N+j*N+i+1]-cons_1[k*M*N+j*N+i-1])-0.2*(cons_1[k*M*N+j*N+i+2]-cons_1[k*M*N+j*N+i-2])+0.038*(cons_1[k*M*N+j*N+i+3]-cons_1[k*M*N+j*N+i-3])-0.0035*(cons_1[k*M*N+j*N+i+4]-cons_1[k*M*N+j*N+i-4]))*dxinv0);
flux_1[k*M*N+j*N+i] = -((0.8*(cons_1[k*M*N+j*N+i+1]*q_1[k*M*N+j*N+i+1]-cons_1[k*M*N+j*N+i-1]*q_1[k*M*N+j*N+i-1]+(q_4[k*M*N+j*N+i+1]-q_4[k*M*N+j*N+i-1]))-0.2*(cons_1[k*M*N+j*N+i+2]*q_1[k*M*N+j*N+i+2]-cons_1[k*M*N+j*N+i-2]*q_1[k*M*N+j*N+i-2]+(q_4[k*M*N+j*N+i+2]-q_4[k*M*N+j*N+i-2]))+0.038*(cons_1[k*M*N+j*N+i+3]*q_1[k*M*N+j*N+i+3]-cons_1[k*M*N+j*N+i-3]*q_1[k*M*N+j*N+i-3]+(q_4[k*M*N+j*N+i+3]-q_4[k*M*N+j*N+i-3]))-0.0035*(cons_1[k*M*N+j*N+i+4]*q_1[k*M*N+j*N+i+4]-cons_1[k*M*N+j*N+i-4]*q_1[k*M*N+j*N+i-4]+(q_4[k*M*N+j*N+i+4]-q_4[k*M*N+j*N+i-4])))*dxinv0);
flux_2[k*M*N+j*N+i] = -((0.8*(cons_2[k*M*N+j*N+i+1]*q_1[k*M*N+j*N+i+1]-cons_2[k*M*N+j*N+i-1]*q_1[k*M*N+j*N+i-1])-0.2*(cons_2[k*M*N+j*N+i+2]*q_1[k*M*N+j*N+i+2]-cons_2[k*M*N+j*N+i-2]*q_1[k*M*N+j*N+i-2])+0.038*(cons_2[k*M*N+j*N+i+3]*q_1[k*M*N+j*N+i+3]-cons_2[k*M*N+j*N+i-3]*q_1[k*M*N+j*N+i-3])-0.0035*(cons_2[k*M*N+j*N+i+4]*q_1[k*M*N+j*N+i+4]-cons_2[k*M*N+j*N+i-4]*q_1[k*M*N+j*N+i-4]))*dxinv0);
flux_3[k*M*N+j*N+i] = -((0.8*(cons_3[k*M*N+j*N+i+1]*q_1[k*M*N+j*N+i+1]-cons_3[k*M*N+j*N+i-1]*q_1[k*M*N+j*N+i-1])-0.2*(cons_3[k*M*N+j*N+i+2]*q_1[k*M*N+j*N+i+2]-cons_3[k*M*N+j*N+i-2]*q_1[k*M*N+j*N+i-2])+0.038*(cons_3[k*M*N+j*N+i+3]*q_1[k*M*N+j*N+i+3]-cons_3[k*M*N+j*N+i-3]*q_1[k*M*N+j*N+i-3])-0.0035*(cons_3[k*M*N+j*N+i+4]*q_1[k*M*N+j*N+i+4]-cons_3[k*M*N+j*N+i-4]*q_1[k*M*N+j*N+i-4]))*dxinv0);
flux_0[k*M*N+j*N+i] -= (0.8*(cons_2[k*M*N+(j+1)*N+i]-cons_2[k*M*N+(j-1)*N+i])-0.2*(cons_2[k*M*N+(j+2)*N+i]-cons_2[k*M*N+(j-2)*N+i])+0.038*(cons_2[k*M*N+(j+3)*N+i]-cons_2[k*M*N+(j-3)*N+i])-0.0035*(cons_2[k*M*N+(j+4)*N+i]-cons_2[k*M*N+(j-4)*N+i]))*dxinv1;
flux_1[k*M*N+j*N+i] -= (0.8*(cons_1[k*M*N+(j+1)*N+i]*q_2[k*M*N+(j+1)*N+i]-cons_1[k*M*N+(j-1)*N+i]*q_2[k*M*N+(j-1)*N+i])-0.2*(cons_1[k*M*N+(j+2)*N+i]*q_2[k*M*N+(j+2)*N+i]-cons_1[k*M*N+(j-2)*N+i]*q_2[k*M*N+(j-2)*N+i])+0.038*(cons_1[k*M*N+(j+3)*N+i]*q_2[k*M*N+(j+3)*N+i]-cons_1[k*M*N+(j-3)*N+i]*q_2[k*M*N+(j-3)*N+i])-0.0035*(cons_1[k*M*N+(j+4)*N+i]*q_2[k*M*N+(j+4)*N+i]-cons_1[k*M*N+(j-4)*N+i]*q_2[k*M*N+(j-4)*N+i]))*dxinv1;
flux_2[k*M*N+j*N+i] -= (0.8*(cons_2[k*M*N+(j+1)*N+i]*q_2[k*M*N+(j+1)*N+i]-cons_2[k*M*N+(j-1)*N+i]*q_2[k*M*N+(j-1)*N+i]+(q_4[k*M*N+(j+1)*N+i]-q_4[k*M*N+(j-1)*N+i]))-0.2*(cons_2[k*M*N+(j+2)*N+i]*q_2[k*M*N+(j+2)*N+i]-cons_2[k*M*N+(j-2)*N+i]*q_2[k*M*N+(j-2)*N+i]+(q_4[k*M*N+(j+2)*N+i]-q_4[k*M*N+(j-2)*N+i]))+0.038*(cons_2[k*M*N+(j+3)*N+i]*q_2[k*M*N+(j+3)*N+i]-cons_2[k*M*N+(j-3)*N+i]*q_2[k*M*N+(j-3)*N+i]+(q_4[k*M*N+(j+3)*N+i]-q_4[k*M*N+(j-3)*N+i]))-0.0035*(cons_2[k*M*N+(j+4)*N+i]*q_2[k*M*N+(j+4)*N+i]-cons_2[k*M*N+(j-4)*N+i]*q_2[k*M*N+(j-4)*N+i]+(q_4[k*M*N+(j+4)*N+i]-q_4[k*M*N+(j-4)*N+i])))*dxinv1;
flux_3[k*M*N+j*N+i] -= (0.8*(cons_3[k*M*N+(j+1)*N+i]*q_2[k*M*N+(j+1)*N+i]-cons_3[k*M*N+(j-1)*N+i]*q_2[k*M*N+(j-1)*N+i])-0.2*(cons_3[k*M*N+(j+2)*N+i]*q_2[k*M*N+(j+2)*N+i]-cons_3[k*M*N+(j-2)*N+i]*q_2[k*M*N+(j-2)*N+i])+0.038*(cons_3[k*M*N+(j+3)*N+i]*q_2[k*M*N+(j+3)*N+i]-cons_3[k*M*N+(j-3)*N+i]*q_2[k*M*N+(j-3)*N+i])-0.0035*(cons_3[k*M*N+(j+4)*N+i]*q_2[k*M*N+(j+4)*N+i]-cons_3[k*M*N+(j-4)*N+i]*q_2[k*M*N+(j-4)*N+i]))*dxinv1;
}
}
__global__ void hypterm_2 (double * __restrict__ flux_0, double * __restrict__ flux_1, double * __restrict__ flux_2, double * __restrict__ flux_3, double * __restrict__ flux_4, double * __restrict__ cons_1, double * __restrict__ cons_2, double * __restrict__ cons_3, double * __restrict__ cons_4, double * __restrict__ q_1, double * __restrict__ q_2, double * __restrict__ q_3, double * __restrict__ q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(4*blockdim_k);
int k = max (k0, 0) + 4*(int)(threadIdx.z);
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
flux_0[k*M*N+j*N+i] -= (0.8*(cons_3[(k+1)*M*N+j*N+i]-cons_3[(k-1)*M*N+j*N+i])-0.2*(cons_3[(k+2)*M*N+j*N+i]-cons_3[(k-2)*M*N+j*N+i])+0.038*(cons_3[(k+3)*M*N+j*N+i]-cons_3[(k-3)*M*N+j*N+i])-0.0035*(cons_3[(k+4)*M*N+j*N+i]-cons_3[(k-4)*M*N+j*N+i]))*dxinv2;
flux_0[(k+1)*M*N+j*N+i] -= (0.8*(cons_3[((k+1)+1)*M*N+j*N+i]-cons_3[((k+1)-1)*M*N+j*N+i])-0.2*(cons_3[((k+1)+2)*M*N+j*N+i]-cons_3[((k+1)-2)*M*N+j*N+i])+0.038*(cons_3[((k+1)+3)*M*N+j*N+i]-cons_3[((k+1)-3)*M*N+j*N+i])-0.0035*(cons_3[((k+1)+4)*M*N+j*N+i]-cons_3[((k+1)-4)*M*N+j*N+i]))*dxinv2;
flux_0[(k+2)*M*N+j*N+i] -= (0.8*(cons_3[((k+2)+1)*M*N+j*N+i]-cons_3[((k+2)-1)*M*N+j*N+i])-0.2*(cons_3[((k+2)+2)*M*N+j*N+i]-cons_3[((k+2)-2)*M*N+j*N+i])+0.038*(cons_3[((k+2)+3)*M*N+j*N+i]-cons_3[((k+2)-3)*M*N+j*N+i])-0.0035*(cons_3[((k+2)+4)*M*N+j*N+i]-cons_3[((k+2)-4)*M*N+j*N+i]))*dxinv2;
flux_0[(k+3)*M*N+j*N+i] -= (0.8*(cons_3[((k+3)+1)*M*N+j*N+i]-cons_3[((k+3)-1)*M*N+j*N+i])-0.2*(cons_3[((k+3)+2)*M*N+j*N+i]-cons_3[((k+3)-2)*M*N+j*N+i])+0.038*(cons_3[((k+3)+3)*M*N+j*N+i]-cons_3[((k+3)-3)*M*N+j*N+i])-0.0035*(cons_3[((k+3)+4)*M*N+j*N+i]-cons_3[((k+3)-4)*M*N+j*N+i]))*dxinv2;
flux_1[k*M*N+j*N+i] -= (0.8*(cons_1[(k+1)*M*N+j*N+i]*q_3[(k+1)*M*N+j*N+i]-cons_1[(k-1)*M*N+j*N+i]*q_3[(k-1)*M*N+j*N+i])-0.2*(cons_1[(k+2)*M*N+j*N+i]*q_3[(k+2)*M*N+j*N+i]-cons_1[(k-2)*M*N+j*N+i]*q_3[(k-2)*M*N+j*N+i])+0.038*(cons_1[(k+3)*M*N+j*N+i]*q_3[(k+3)*M*N+j*N+i]-cons_1[(k-3)*M*N+j*N+i]*q_3[(k-3)*M*N+j*N+i])-0.0035*(cons_1[(k+4)*M*N+j*N+i]*q_3[(k+4)*M*N+j*N+i]-cons_1[(k-4)*M*N+j*N+i]*q_3[(k-4)*M*N+j*N+i]))*dxinv2;
flux_1[(k+1)*M*N+j*N+i] -= (0.8*(cons_1[((k+1)+1)*M*N+j*N+i]*q_3[((k+1)+1)*M*N+j*N+i]-cons_1[((k+1)-1)*M*N+j*N+i]*q_3[((k+1)-1)*M*N+j*N+i])-0.2*(cons_1[((k+1)+2)*M*N+j*N+i]*q_3[((k+1)+2)*M*N+j*N+i]-cons_1[((k+1)-2)*M*N+j*N+i]*q_3[((k+1)-2)*M*N+j*N+i])+0.038*(cons_1[((k+1)+3)*M*N+j*N+i]*q_3[((k+1)+3)*M*N+j*N+i]-cons_1[((k+1)-3)*M*N+j*N+i]*q_3[((k+1)-3)*M*N+j*N+i])-0.0035*(cons_1[((k+1)+4)*M*N+j*N+i]*q_3[((k+1)+4)*M*N+j*N+i]-cons_1[((k+1)-4)*M*N+j*N+i]*q_3[((k+1)-4)*M*N+j*N+i]))*dxinv2;
flux_1[(k+2)*M*N+j*N+i] -= (0.8*(cons_1[((k+2)+1)*M*N+j*N+i]*q_3[((k+2)+1)*M*N+j*N+i]-cons_1[((k+2)-1)*M*N+j*N+i]*q_3[((k+2)-1)*M*N+j*N+i])-0.2*(cons_1[((k+2)+2)*M*N+j*N+i]*q_3[((k+2)+2)*M*N+j*N+i]-cons_1[((k+2)-2)*M*N+j*N+i]*q_3[((k+2)-2)*M*N+j*N+i])+0.038*(cons_1[((k+2)+3)*M*N+j*N+i]*q_3[((k+2)+3)*M*N+j*N+i]-cons_1[((k+2)-3)*M*N+j*N+i]*q_3[((k+2)-3)*M*N+j*N+i])-0.0035*(cons_1[((k+2)+4)*M*N+j*N+i]*q_3[((k+2)+4)*M*N+j*N+i]-cons_1[((k+2)-4)*M*N+j*N+i]*q_3[((k+2)-4)*M*N+j*N+i]))*dxinv2;
flux_1[(k+3)*M*N+j*N+i] -= (0.8*(cons_1[((k+3)+1)*M*N+j*N+i]*q_3[((k+3)+1)*M*N+j*N+i]-cons_1[((k+3)-1)*M*N+j*N+i]*q_3[((k+3)-1)*M*N+j*N+i])-0.2*(cons_1[((k+3)+2)*M*N+j*N+i]*q_3[((k+3)+2)*M*N+j*N+i]-cons_1[((k+3)-2)*M*N+j*N+i]*q_3[((k+3)-2)*M*N+j*N+i])+0.038*(cons_1[((k+3)+3)*M*N+j*N+i]*q_3[((k+3)+3)*M*N+j*N+i]-cons_1[((k+3)-3)*M*N+j*N+i]*q_3[((k+3)-3)*M*N+j*N+i])-0.0035*(cons_1[((k+3)+4)*M*N+j*N+i]*q_3[((k+3)+4)*M*N+j*N+i]-cons_1[((k+3)-4)*M*N+j*N+i]*q_3[((k+3)-4)*M*N+j*N+i]))*dxinv2;
flux_2[k*M*N+j*N+i] -= (0.8*(cons_2[(k+1)*M*N+j*N+i]*q_3[(k+1)*M*N+j*N+i]-cons_2[(k-1)*M*N+j*N+i]*q_3[(k-1)*M*N+j*N+i])-0.2*(cons_2[(k+2)*M*N+j*N+i]*q_3[(k+2)*M*N+j*N+i]-cons_2[(k-2)*M*N+j*N+i]*q_3[(k-2)*M*N+j*N+i])+0.038*(cons_2[(k+3)*M*N+j*N+i]*q_3[(k+3)*M*N+j*N+i]-cons_2[(k-3)*M*N+j*N+i]*q_3[(k-3)*M*N+j*N+i])-0.0035*(cons_2[(k+4)*M*N+j*N+i]*q_3[(k+4)*M*N+j*N+i]-cons_2[(k-4)*M*N+j*N+i]*q_3[(k-4)*M*N+j*N+i]))*dxinv2;
flux_2[(k+1)*M*N+j*N+i] -= (0.8*(cons_2[((k+1)+1)*M*N+j*N+i]*q_3[((k+1)+1)*M*N+j*N+i]-cons_2[((k+1)-1)*M*N+j*N+i]*q_3[((k+1)-1)*M*N+j*N+i])-0.2*(cons_2[((k+1)+2)*M*N+j*N+i]*q_3[((k+1)+2)*M*N+j*N+i]-cons_2[((k+1)-2)*M*N+j*N+i]*q_3[((k+1)-2)*M*N+j*N+i])+0.038*(cons_2[((k+1)+3)*M*N+j*N+i]*q_3[((k+1)+3)*M*N+j*N+i]-cons_2[((k+1)-3)*M*N+j*N+i]*q_3[((k+1)-3)*M*N+j*N+i])-0.0035*(cons_2[((k+1)+4)*M*N+j*N+i]*q_3[((k+1)+4)*M*N+j*N+i]-cons_2[((k+1)-4)*M*N+j*N+i]*q_3[((k+1)-4)*M*N+j*N+i]))*dxinv2;
flux_2[(k+2)*M*N+j*N+i] -= (0.8*(cons_2[((k+2)+1)*M*N+j*N+i]*q_3[((k+2)+1)*M*N+j*N+i]-cons_2[((k+2)-1)*M*N+j*N+i]*q_3[((k+2)-1)*M*N+j*N+i])-0.2*(cons_2[((k+2)+2)*M*N+j*N+i]*q_3[((k+2)+2)*M*N+j*N+i]-cons_2[((k+2)-2)*M*N+j*N+i]*q_3[((k+2)-2)*M*N+j*N+i])+0.038*(cons_2[((k+2)+3)*M*N+j*N+i]*q_3[((k+2)+3)*M*N+j*N+i]-cons_2[((k+2)-3)*M*N+j*N+i]*q_3[((k+2)-3)*M*N+j*N+i])-0.0035*(cons_2[((k+2)+4)*M*N+j*N+i]*q_3[((k+2)+4)*M*N+j*N+i]-cons_2[((k+2)-4)*M*N+j*N+i]*q_3[((k+2)-4)*M*N+j*N+i]))*dxinv2;
flux_2[(k+3)*M*N+j*N+i] -= (0.8*(cons_2[((k+3)+1)*M*N+j*N+i]*q_3[((k+3)+1)*M*N+j*N+i]-cons_2[((k+3)-1)*M*N+j*N+i]*q_3[((k+3)-1)*M*N+j*N+i])-0.2*(cons_2[((k+3)+2)*M*N+j*N+i]*q_3[((k+3)+2)*M*N+j*N+i]-cons_2[((k+3)-2)*M*N+j*N+i]*q_3[((k+3)-2)*M*N+j*N+i])+0.038*(cons_2[((k+3)+3)*M*N+j*N+i]*q_3[((k+3)+3)*M*N+j*N+i]-cons_2[((k+3)-3)*M*N+j*N+i]*q_3[((k+3)-3)*M*N+j*N+i])-0.0035*(cons_2[((k+3)+4)*M*N+j*N+i]*q_3[((k+3)+4)*M*N+j*N+i]-cons_2[((k+3)-4)*M*N+j*N+i]*q_3[((k+3)-4)*M*N+j*N+i]))*dxinv2;
flux_3[k*M*N+j*N+i] -= (0.8*(cons_3[(k+1)*M*N+j*N+i]*q_3[(k+1)*M*N+j*N+i]-cons_3[(k-1)*M*N+j*N+i]*q_3[(k-1)*M*N+j*N+i]+(q_4[(k+1)*M*N+j*N+i]-q_4[(k-1)*M*N+j*N+i]))-0.2*(cons_3[(k+2)*M*N+j*N+i]*q_3[(k+2)*M*N+j*N+i]-cons_3[(k-2)*M*N+j*N+i]*q_3[(k-2)*M*N+j*N+i]+(q_4[(k+2)*M*N+j*N+i]-q_4[(k-2)*M*N+j*N+i]))+0.038*(cons_3[(k+3)*M*N+j*N+i]*q_3[(k+3)*M*N+j*N+i]-cons_3[(k-3)*M*N+j*N+i]*q_3[(k-3)*M*N+j*N+i]+(q_4[(k+3)*M*N+j*N+i]-q_4[(k-3)*M*N+j*N+i]))-0.0035*(cons_3[(k+4)*M*N+j*N+i]*q_3[(k+4)*M*N+j*N+i]-cons_3[(k-4)*M*N+j*N+i]*q_3[(k-4)*M*N+j*N+i]+(q_4[(k+4)*M*N+j*N+i]-q_4[(k-4)*M*N+j*N+i])))*dxinv2;
flux_3[(k+1)*M*N+j*N+i] -= (0.8*(cons_3[((k+1)+1)*M*N+j*N+i]*q_3[((k+1)+1)*M*N+j*N+i]-cons_3[((k+1)-1)*M*N+j*N+i]*q_3[((k+1)-1)*M*N+j*N+i]+(q_4[((k+1)+1)*M*N+j*N+i]-q_4[((k+1)-1)*M*N+j*N+i]))-0.2*(cons_3[((k+1)+2)*M*N+j*N+i]*q_3[((k+1)+2)*M*N+j*N+i]-cons_3[((k+1)-2)*M*N+j*N+i]*q_3[((k+1)-2)*M*N+j*N+i]+(q_4[((k+1)+2)*M*N+j*N+i]-q_4[((k+1)-2)*M*N+j*N+i]))+0.038*(cons_3[((k+1)+3)*M*N+j*N+i]*q_3[((k+1)+3)*M*N+j*N+i]-cons_3[((k+1)-3)*M*N+j*N+i]*q_3[((k+1)-3)*M*N+j*N+i]+(q_4[((k+1)+3)*M*N+j*N+i]-q_4[((k+1)-3)*M*N+j*N+i]))-0.0035*(cons_3[((k+1)+4)*M*N+j*N+i]*q_3[((k+1)+4)*M*N+j*N+i]-cons_3[((k+1)-4)*M*N+j*N+i]*q_3[((k+1)-4)*M*N+j*N+i]+(q_4[((k+1)+4)*M*N+j*N+i]-q_4[((k+1)-4)*M*N+j*N+i])))*dxinv2;
flux_3[(k+2)*M*N+j*N+i] -= (0.8*(cons_3[((k+2)+1)*M*N+j*N+i]*q_3[((k+2)+1)*M*N+j*N+i]-cons_3[((k+2)-1)*M*N+j*N+i]*q_3[((k+2)-1)*M*N+j*N+i]+(q_4[((k+2)+1)*M*N+j*N+i]-q_4[((k+2)-1)*M*N+j*N+i]))-0.2*(cons_3[((k+2)+2)*M*N+j*N+i]*q_3[((k+2)+2)*M*N+j*N+i]-cons_3[((k+2)-2)*M*N+j*N+i]*q_3[((k+2)-2)*M*N+j*N+i]+(q_4[((k+2)+2)*M*N+j*N+i]-q_4[((k+2)-2)*M*N+j*N+i]))+0.038*(cons_3[((k+2)+3)*M*N+j*N+i]*q_3[((k+2)+3)*M*N+j*N+i]-cons_3[((k+2)-3)*M*N+j*N+i]*q_3[((k+2)-3)*M*N+j*N+i]+(q_4[((k+2)+3)*M*N+j*N+i]-q_4[((k+2)-3)*M*N+j*N+i]))-0.0035*(cons_3[((k+2)+4)*M*N+j*N+i]*q_3[((k+2)+4)*M*N+j*N+i]-cons_3[((k+2)-4)*M*N+j*N+i]*q_3[((k+2)-4)*M*N+j*N+i]+(q_4[((k+2)+4)*M*N+j*N+i]-q_4[((k+2)-4)*M*N+j*N+i])))*dxinv2;
flux_3[(k+3)*M*N+j*N+i] -= (0.8*(cons_3[((k+3)+1)*M*N+j*N+i]*q_3[((k+3)+1)*M*N+j*N+i]-cons_3[((k+3)-1)*M*N+j*N+i]*q_3[((k+3)-1)*M*N+j*N+i]+(q_4[((k+3)+1)*M*N+j*N+i]-q_4[((k+3)-1)*M*N+j*N+i]))-0.2*(cons_3[((k+3)+2)*M*N+j*N+i]*q_3[((k+3)+2)*M*N+j*N+i]-cons_3[((k+3)-2)*M*N+j*N+i]*q_3[((k+3)-2)*M*N+j*N+i]+(q_4[((k+3)+2)*M*N+j*N+i]-q_4[((k+3)-2)*M*N+j*N+i]))+0.038*(cons_3[((k+3)+3)*M*N+j*N+i]*q_3[((k+3)+3)*M*N+j*N+i]-cons_3[((k+3)-3)*M*N+j*N+i]*q_3[((k+3)-3)*M*N+j*N+i]+(q_4[((k+3)+3)*M*N+j*N+i]-q_4[((k+3)-3)*M*N+j*N+i]))-0.0035*(cons_3[((k+3)+4)*M*N+j*N+i]*q_3[((k+3)+4)*M*N+j*N+i]-cons_3[((k+3)-4)*M*N+j*N+i]*q_3[((k+3)-4)*M*N+j*N+i]+(q_4[((k+3)+4)*M*N+j*N+i]-q_4[((k+3)-4)*M*N+j*N+i])))*dxinv2;
}
}
__global__ void hypterm_3 (double * __restrict__ flux_in_0, double * __restrict__ flux_in_1, double * __restrict__ flux_in_2, double * __restrict__ flux_in_3, double * __restrict__ flux_in_4, double * __restrict__ cons_in_1, double * __restrict__ cons_in_2, double * __restrict__ cons_in_3, double * __restrict__ cons_in_4, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(2*blockdim_k);
int k = max (k0, 0) + 2*(int)(threadIdx.z);
double (*flux_4)[308][308] = (double (*)[308][308])flux_in_4;
double (*cons_4)[308][308] = (double (*)[308][308])cons_in_4;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
double _t_5_;
double _t_1_;
double _t_2_;
double _t_3_;
double _t_4_;
double _t_0_;
double flux_4kc0jc0ic0 = 0;
double _t_11_;
double _t_7_;
double _t_10_;
double _t_8_;
double _t_9_;
double _t_6_;
double flux_4kp1jc0ic0 = 0;
double _t_17_;
double _t_13_;
double _t_14_;
double _t_15_;
double _t_16_;
double _t_12_;
double _t_23_;
double _t_19_;
double _t_20_;
double _t_21_;
double _t_22_;
double _t_18_;
double _t_29_;
double _t_34_;
double _t_25_;
double _t_27_;
double _t_31_;
double _t_32_;
double _t_26_;
double _t_33_;
double _t_28_;
double _t_35_;
double _t_24_;
double _t_30_;
_t_5_ = -cons_4[k][j][i-4] * q_1[k][j][i-4];
_t_5_ -= q_4[k][j][i-4] * q_1[k][j][i-4];
_t_5_ += cons_4[k][j][i+4] * q_1[k][j][i+4];
_t_5_ += q_4[k][j][i+4] * q_1[k][j][i+4];
_t_1_ = -0.0035 * _t_5_;
_t_2_ = -cons_4[k][j][i-1] * q_1[k][j][i-1];
_t_2_ -= q_4[k][j][i-1] * q_1[k][j][i-1];
_t_2_ += cons_4[k][j][i+1] * q_1[k][j][i+1];
_t_2_ += q_4[k][j][i+1] * q_1[k][j][i+1];
_t_1_ += 0.8 * _t_2_;
_t_3_ = -cons_4[k][j][i-2] * q_1[k][j][i-2];
_t_3_ -= q_4[k][j][i-2] * q_1[k][j][i-2];
_t_3_ += cons_4[k][j][i+2] * q_1[k][j][i+2];
_t_3_ += q_4[k][j][i+2] * q_1[k][j][i+2];
_t_1_ -= 0.2 * _t_3_;
_t_4_ = -cons_4[k][j][i-3] * q_1[k][j][i-3];
_t_4_ -= q_4[k][j][i-3] * q_1[k][j][i-3];
_t_4_ += cons_4[k][j][i+3] * q_1[k][j][i+3];
_t_4_ += q_4[k][j][i+3] * q_1[k][j][i+3];
_t_1_ += 0.038 * _t_4_;
_t_0_ = _t_1_ * dxinv0;
flux_4kc0jc0ic0 -= _t_0_;
_t_11_ = -cons_4[k+1][j][i-4] * q_1[k+1][j][i-4];
_t_11_ -= q_4[k+1][j][i-4] * q_1[k+1][j][i-4];
_t_11_ += cons_4[k+1][j][i+4] * q_1[k+1][j][i+4];
_t_11_ += q_4[k+1][j][i+4] * q_1[k+1][j][i+4];
_t_7_ = -0.0035 * _t_11_;
_t_10_ = -cons_4[k+1][j][i-3] * q_1[k+1][j][i-3];
_t_10_ -= q_4[k+1][j][i-3] * q_1[k+1][j][i-3];
_t_10_ += cons_4[k+1][j][i+3] * q_1[k+1][j][i+3];
_t_10_ += q_4[k+1][j][i+3] * q_1[k+1][j][i+3];
_t_7_ += 0.038 * _t_10_;
_t_8_ = -cons_4[k+1][j][i-1] * q_1[k+1][j][i-1];
_t_8_ -= q_4[k+1][j][i-1] * q_1[k+1][j][i-1];
_t_8_ += cons_4[k+1][j][i+1] * q_1[k+1][j][i+1];
_t_8_ += q_4[k+1][j][i+1] * q_1[k+1][j][i+1];
_t_7_ += 0.8 * _t_8_;
_t_9_ = -cons_4[k+1][j][i-2] * q_1[k+1][j][i-2];
_t_9_ -= q_4[k+1][j][i-2] * q_1[k+1][j][i-2];
_t_9_ += cons_4[k+1][j][i+2] * q_1[k+1][j][i+2];
_t_9_ += q_4[k+1][j][i+2] * q_1[k+1][j][i+2];
_t_7_ -= 0.2 * _t_9_;
_t_6_ = _t_7_ * dxinv0;
flux_4kp1jc0ic0 -= _t_6_;
_t_17_ = -cons_4[k][j-4][i] * q_2[k][j-4][i];
_t_17_ -= q_4[k][j-4][i] * q_2[k][j-4][i];
_t_17_ += cons_4[k][j+4][i] * q_2[k][j+4][i];
_t_17_ += q_4[k][j+4][i] * q_2[k][j+4][i];
_t_13_ = -0.0035 * _t_17_;
_t_14_ = -cons_4[k][j-1][i] * q_2[k][j-1][i];
_t_14_ -= q_4[k][j-1][i] * q_2[k][j-1][i];
_t_14_ += cons_4[k][j+1][i] * q_2[k][j+1][i];
_t_14_ += q_4[k][j+1][i] * q_2[k][j+1][i];
_t_13_ += 0.8 * _t_14_;
_t_15_ = -cons_4[k][j-2][i] * q_2[k][j-2][i];
_t_15_ -= q_4[k][j-2][i] * q_2[k][j-2][i];
_t_15_ += cons_4[k][j+2][i] * q_2[k][j+2][i];
_t_15_ += q_4[k][j+2][i] * q_2[k][j+2][i];
_t_13_ -= 0.2 * _t_15_;
_t_16_ = -cons_4[k][j-3][i] * q_2[k][j-3][i];
_t_16_ -= q_4[k][j-3][i] * q_2[k][j-3][i];
_t_16_ += cons_4[k][j+3][i] * q_2[k][j+3][i];
_t_16_ += q_4[k][j+3][i] * q_2[k][j+3][i];
_t_13_ += 0.038 * _t_16_;
_t_12_ = _t_13_ * dxinv1;
flux_4kc0jc0ic0 -= _t_12_;
_t_23_ = -cons_4[k+1][j-4][i] * q_2[k+1][j-4][i];
_t_23_ -= q_4[k+1][j-4][i] * q_2[k+1][j-4][i];
_t_23_ += cons_4[k+1][j+4][i] * q_2[k+1][j+4][i];
_t_23_ += q_4[k+1][j+4][i] * q_2[k+1][j+4][i];
_t_19_ = -0.0035 * _t_23_;
_t_20_ = -cons_4[k+1][j-1][i] * q_2[k+1][j-1][i];
_t_20_ -= q_4[k+1][j-1][i] * q_2[k+1][j-1][i];
_t_20_ += cons_4[k+1][j+1][i] * q_2[k+1][j+1][i];
_t_20_ += q_4[k+1][j+1][i] * q_2[k+1][j+1][i];
_t_19_ += 0.8 * _t_20_;
_t_21_ = -cons_4[k+1][j-2][i] * q_2[k+1][j-2][i];
_t_21_ -= q_4[k+1][j-2][i] * q_2[k+1][j-2][i];
_t_21_ += cons_4[k+1][j+2][i] * q_2[k+1][j+2][i];
_t_21_ += q_4[k+1][j+2][i] * q_2[k+1][j+2][i];
_t_19_ -= 0.2 * _t_21_;
_t_22_ = -cons_4[k+1][j-3][i] * q_2[k+1][j-3][i];
_t_22_ -= q_4[k+1][j-3][i] * q_2[k+1][j-3][i];
_t_22_ += cons_4[k+1][j+3][i] * q_2[k+1][j+3][i];
_t_22_ += q_4[k+1][j+3][i] * q_2[k+1][j+3][i];
_t_19_ += 0.038 * _t_22_;
_t_18_ = _t_19_ * dxinv1;
flux_4kp1jc0ic0 -= _t_18_;
_t_29_ = -cons_4[k-4][j][i] * q_3[k-4][j][i];
_t_29_ -= q_4[k-4][j][i] * q_3[k-4][j][i];
_t_29_ += cons_4[k+4][j][i] * q_3[k+4][j][i];
_t_34_ = cons_4[k+4][j][i] * q_3[k+4][j][i];
_t_29_ += q_4[k+4][j][i] * q_3[k+4][j][i];
_t_34_ += q_4[k+4][j][i] * q_3[k+4][j][i];
_t_25_ = -0.0035 * _t_29_;
_t_34_ -= cons_4[k-2][j][i] * q_3[k-2][j][i];
_t_27_ = -cons_4[k-2][j][i] * q_3[k-2][j][i];
_t_27_ -= q_4[k-2][j][i] * q_3[k-2][j][i];
_t_34_ -= q_4[k-2][j][i] * q_3[k-2][j][i];
_t_31_ = 0.038 * _t_34_;
_t_27_ += cons_4[k+2][j][i] * q_3[k+2][j][i];
_t_32_ = cons_4[k+2][j][i] * q_3[k+2][j][i];
_t_27_ += q_4[k+2][j][i] * q_3[k+2][j][i];
_t_25_ -= 0.2 * _t_27_;
_t_32_ += q_4[k+2][j][i] * q_3[k+2][j][i];
_t_32_ -= cons_4[k][j][i] * q_3[k][j][i];
_t_32_ -= q_4[k][j][i] * q_3[k][j][i];
_t_31_ += 0.8 * _t_32_;
_t_26_ = cons_4[k+1][j][i] * q_3[k+1][j][i];
_t_26_ += q_4[k+1][j][i] * q_3[k+1][j][i];
_t_26_ -= cons_4[k-1][j][i] * q_3[k-1][j][i];
_t_33_ = -cons_4[k-1][j][i] * q_3[k-1][j][i];
_t_26_ -= q_4[k-1][j][i] * q_3[k-1][j][i];
_t_25_ += 0.8 * _t_26_;
_t_33_ -= q_4[k-1][j][i] * q_3[k-1][j][i];
_t_33_ += cons_4[k+3][j][i] * q_3[k+3][j][i];
_t_28_ = cons_4[k+3][j][i] * q_3[k+3][j][i];
_t_28_ += q_4[k+3][j][i] * q_3[k+3][j][i];
_t_33_ += q_4[k+3][j][i] * q_3[k+3][j][i];
_t_31_ -= 0.2 * _t_33_;
_t_28_ -= cons_4[k-3][j][i] * q_3[k-3][j][i];
_t_35_ = -cons_4[k-3][j][i] * q_3[k-3][j][i];
_t_28_ -= q_4[k-3][j][i] * q_3[k-3][j][i];
_t_25_ += 0.038 * _t_28_;
_t_35_ -= q_4[k-3][j][i] * q_3[k-3][j][i];
_t_35_ += cons_4[k+5][j][i] * q_3[k+5][j][i];
_t_35_ += q_4[k+5][j][i] * q_3[k+5][j][i];
_t_31_ -= 0.0035 * _t_35_;
_t_24_ = _t_25_ * dxinv2;
flux_4kc0jc0ic0 -= _t_24_;
flux_4[k][j][i] = flux_4kc0jc0ic0;
_t_30_ = _t_31_ * dxinv2;
flux_4kp1jc0ic0 -= _t_30_;
flux_4[k+1][j][i] = flux_4kp1jc0ic0;
}
}
extern "C" void host_code (double *h_flux_0, double *h_flux_1, double *h_flux_2, double *h_flux_3, double *h_flux_4, double *h_cons_1, double *h_cons_2, double *h_cons_3, double *h_cons_4, double *h_q_1, double *h_q_2, double *h_q_3, double *h_q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
double *flux_0;
hipMalloc (&flux_0, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_0\n");
hipMemcpy (flux_0, h_flux_0, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_1;
hipMalloc (&flux_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_1\n");
hipMemcpy (flux_1, h_flux_1, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_2;
hipMalloc (&flux_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_2\n");
hipMemcpy (flux_2, h_flux_2, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_3;
hipMalloc (&flux_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_3\n");
hipMemcpy (flux_3, h_flux_3, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *flux_4;
hipMalloc (&flux_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_4\n");
hipMemcpy (flux_4, h_flux_4, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_1;
hipMalloc (&cons_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_1\n");
hipMemcpy (cons_1, h_cons_1, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_2;
hipMalloc (&cons_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_2\n");
hipMemcpy (cons_2, h_cons_2, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_3;
hipMalloc (&cons_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_3\n");
hipMemcpy (cons_3, h_cons_3, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *cons_4;
hipMalloc (&cons_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_4\n");
hipMemcpy (cons_4, h_cons_4, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_1;
hipMalloc (&q_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_1\n");
hipMemcpy (q_1, h_q_1, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_2;
hipMalloc (&q_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_2\n");
hipMemcpy (q_2, h_q_2, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_3;
hipMalloc (&q_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_3\n");
hipMemcpy (q_3, h_q_3, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
double *q_4;
hipMalloc (&q_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_4\n");
hipMemcpy (q_4, h_q_4, sizeof(double)*L*M*N, hipMemcpyHostToDevice);
dim3 blockconfig (16, 4, 4);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, blockconfig.z));
hipLaunchKernelGGL(( hypterm_1) , dim3(gridconfig), dim3(blockconfig), 0, 0, flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, dxinv0, dxinv1, dxinv2, L, M, N);
dim3 gridconfig_2 (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, 4*blockconfig.z));
hipLaunchKernelGGL(( hypterm_2) , dim3(gridconfig_2), dim3(blockconfig), 0, 0, flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, dxinv0, dxinv1, dxinv2, L, M, N);
dim3 gridconfig_3 (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, 2*blockconfig.z));
hipLaunchKernelGGL(( hypterm_3) , dim3(gridconfig_3), dim3(blockconfig), 0, 0, flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, dxinv0, dxinv1, dxinv2, L, M, N);
hipMemcpy (h_flux_0, flux_0, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_1, flux_1, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_3, flux_3, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_4, flux_4, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_2, flux_2, sizeof(double)*L*M*N, hipMemcpyDeviceToHost);
}
| ae0cab5d74c3b4b56aaf470108e2f084359fb13a.cu | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void hypterm_1 (double * __restrict__ flux_0, double * __restrict__ flux_1, double * __restrict__ flux_2, double * __restrict__ flux_3, double * __restrict__ flux_4, double * __restrict__ cons_1, double * __restrict__ cons_2, double * __restrict__ cons_3, double * __restrict__ cons_4, double * __restrict__ q_1, double * __restrict__ q_2, double * __restrict__ q_3, double * __restrict__ q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.z);
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
flux_0[k*M*N+j*N+i] = -((0.8*(cons_1[k*M*N+j*N+i+1]-cons_1[k*M*N+j*N+i-1])-0.2*(cons_1[k*M*N+j*N+i+2]-cons_1[k*M*N+j*N+i-2])+0.038*(cons_1[k*M*N+j*N+i+3]-cons_1[k*M*N+j*N+i-3])-0.0035*(cons_1[k*M*N+j*N+i+4]-cons_1[k*M*N+j*N+i-4]))*dxinv0);
flux_1[k*M*N+j*N+i] = -((0.8*(cons_1[k*M*N+j*N+i+1]*q_1[k*M*N+j*N+i+1]-cons_1[k*M*N+j*N+i-1]*q_1[k*M*N+j*N+i-1]+(q_4[k*M*N+j*N+i+1]-q_4[k*M*N+j*N+i-1]))-0.2*(cons_1[k*M*N+j*N+i+2]*q_1[k*M*N+j*N+i+2]-cons_1[k*M*N+j*N+i-2]*q_1[k*M*N+j*N+i-2]+(q_4[k*M*N+j*N+i+2]-q_4[k*M*N+j*N+i-2]))+0.038*(cons_1[k*M*N+j*N+i+3]*q_1[k*M*N+j*N+i+3]-cons_1[k*M*N+j*N+i-3]*q_1[k*M*N+j*N+i-3]+(q_4[k*M*N+j*N+i+3]-q_4[k*M*N+j*N+i-3]))-0.0035*(cons_1[k*M*N+j*N+i+4]*q_1[k*M*N+j*N+i+4]-cons_1[k*M*N+j*N+i-4]*q_1[k*M*N+j*N+i-4]+(q_4[k*M*N+j*N+i+4]-q_4[k*M*N+j*N+i-4])))*dxinv0);
flux_2[k*M*N+j*N+i] = -((0.8*(cons_2[k*M*N+j*N+i+1]*q_1[k*M*N+j*N+i+1]-cons_2[k*M*N+j*N+i-1]*q_1[k*M*N+j*N+i-1])-0.2*(cons_2[k*M*N+j*N+i+2]*q_1[k*M*N+j*N+i+2]-cons_2[k*M*N+j*N+i-2]*q_1[k*M*N+j*N+i-2])+0.038*(cons_2[k*M*N+j*N+i+3]*q_1[k*M*N+j*N+i+3]-cons_2[k*M*N+j*N+i-3]*q_1[k*M*N+j*N+i-3])-0.0035*(cons_2[k*M*N+j*N+i+4]*q_1[k*M*N+j*N+i+4]-cons_2[k*M*N+j*N+i-4]*q_1[k*M*N+j*N+i-4]))*dxinv0);
flux_3[k*M*N+j*N+i] = -((0.8*(cons_3[k*M*N+j*N+i+1]*q_1[k*M*N+j*N+i+1]-cons_3[k*M*N+j*N+i-1]*q_1[k*M*N+j*N+i-1])-0.2*(cons_3[k*M*N+j*N+i+2]*q_1[k*M*N+j*N+i+2]-cons_3[k*M*N+j*N+i-2]*q_1[k*M*N+j*N+i-2])+0.038*(cons_3[k*M*N+j*N+i+3]*q_1[k*M*N+j*N+i+3]-cons_3[k*M*N+j*N+i-3]*q_1[k*M*N+j*N+i-3])-0.0035*(cons_3[k*M*N+j*N+i+4]*q_1[k*M*N+j*N+i+4]-cons_3[k*M*N+j*N+i-4]*q_1[k*M*N+j*N+i-4]))*dxinv0);
flux_0[k*M*N+j*N+i] -= (0.8*(cons_2[k*M*N+(j+1)*N+i]-cons_2[k*M*N+(j-1)*N+i])-0.2*(cons_2[k*M*N+(j+2)*N+i]-cons_2[k*M*N+(j-2)*N+i])+0.038*(cons_2[k*M*N+(j+3)*N+i]-cons_2[k*M*N+(j-3)*N+i])-0.0035*(cons_2[k*M*N+(j+4)*N+i]-cons_2[k*M*N+(j-4)*N+i]))*dxinv1;
flux_1[k*M*N+j*N+i] -= (0.8*(cons_1[k*M*N+(j+1)*N+i]*q_2[k*M*N+(j+1)*N+i]-cons_1[k*M*N+(j-1)*N+i]*q_2[k*M*N+(j-1)*N+i])-0.2*(cons_1[k*M*N+(j+2)*N+i]*q_2[k*M*N+(j+2)*N+i]-cons_1[k*M*N+(j-2)*N+i]*q_2[k*M*N+(j-2)*N+i])+0.038*(cons_1[k*M*N+(j+3)*N+i]*q_2[k*M*N+(j+3)*N+i]-cons_1[k*M*N+(j-3)*N+i]*q_2[k*M*N+(j-3)*N+i])-0.0035*(cons_1[k*M*N+(j+4)*N+i]*q_2[k*M*N+(j+4)*N+i]-cons_1[k*M*N+(j-4)*N+i]*q_2[k*M*N+(j-4)*N+i]))*dxinv1;
flux_2[k*M*N+j*N+i] -= (0.8*(cons_2[k*M*N+(j+1)*N+i]*q_2[k*M*N+(j+1)*N+i]-cons_2[k*M*N+(j-1)*N+i]*q_2[k*M*N+(j-1)*N+i]+(q_4[k*M*N+(j+1)*N+i]-q_4[k*M*N+(j-1)*N+i]))-0.2*(cons_2[k*M*N+(j+2)*N+i]*q_2[k*M*N+(j+2)*N+i]-cons_2[k*M*N+(j-2)*N+i]*q_2[k*M*N+(j-2)*N+i]+(q_4[k*M*N+(j+2)*N+i]-q_4[k*M*N+(j-2)*N+i]))+0.038*(cons_2[k*M*N+(j+3)*N+i]*q_2[k*M*N+(j+3)*N+i]-cons_2[k*M*N+(j-3)*N+i]*q_2[k*M*N+(j-3)*N+i]+(q_4[k*M*N+(j+3)*N+i]-q_4[k*M*N+(j-3)*N+i]))-0.0035*(cons_2[k*M*N+(j+4)*N+i]*q_2[k*M*N+(j+4)*N+i]-cons_2[k*M*N+(j-4)*N+i]*q_2[k*M*N+(j-4)*N+i]+(q_4[k*M*N+(j+4)*N+i]-q_4[k*M*N+(j-4)*N+i])))*dxinv1;
flux_3[k*M*N+j*N+i] -= (0.8*(cons_3[k*M*N+(j+1)*N+i]*q_2[k*M*N+(j+1)*N+i]-cons_3[k*M*N+(j-1)*N+i]*q_2[k*M*N+(j-1)*N+i])-0.2*(cons_3[k*M*N+(j+2)*N+i]*q_2[k*M*N+(j+2)*N+i]-cons_3[k*M*N+(j-2)*N+i]*q_2[k*M*N+(j-2)*N+i])+0.038*(cons_3[k*M*N+(j+3)*N+i]*q_2[k*M*N+(j+3)*N+i]-cons_3[k*M*N+(j-3)*N+i]*q_2[k*M*N+(j-3)*N+i])-0.0035*(cons_3[k*M*N+(j+4)*N+i]*q_2[k*M*N+(j+4)*N+i]-cons_3[k*M*N+(j-4)*N+i]*q_2[k*M*N+(j-4)*N+i]))*dxinv1;
}
}
__global__ void hypterm_2 (double * __restrict__ flux_0, double * __restrict__ flux_1, double * __restrict__ flux_2, double * __restrict__ flux_3, double * __restrict__ flux_4, double * __restrict__ cons_1, double * __restrict__ cons_2, double * __restrict__ cons_3, double * __restrict__ cons_4, double * __restrict__ q_1, double * __restrict__ q_2, double * __restrict__ q_3, double * __restrict__ q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(4*blockdim_k);
int k = max (k0, 0) + 4*(int)(threadIdx.z);
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
flux_0[k*M*N+j*N+i] -= (0.8*(cons_3[(k+1)*M*N+j*N+i]-cons_3[(k-1)*M*N+j*N+i])-0.2*(cons_3[(k+2)*M*N+j*N+i]-cons_3[(k-2)*M*N+j*N+i])+0.038*(cons_3[(k+3)*M*N+j*N+i]-cons_3[(k-3)*M*N+j*N+i])-0.0035*(cons_3[(k+4)*M*N+j*N+i]-cons_3[(k-4)*M*N+j*N+i]))*dxinv2;
flux_0[(k+1)*M*N+j*N+i] -= (0.8*(cons_3[((k+1)+1)*M*N+j*N+i]-cons_3[((k+1)-1)*M*N+j*N+i])-0.2*(cons_3[((k+1)+2)*M*N+j*N+i]-cons_3[((k+1)-2)*M*N+j*N+i])+0.038*(cons_3[((k+1)+3)*M*N+j*N+i]-cons_3[((k+1)-3)*M*N+j*N+i])-0.0035*(cons_3[((k+1)+4)*M*N+j*N+i]-cons_3[((k+1)-4)*M*N+j*N+i]))*dxinv2;
flux_0[(k+2)*M*N+j*N+i] -= (0.8*(cons_3[((k+2)+1)*M*N+j*N+i]-cons_3[((k+2)-1)*M*N+j*N+i])-0.2*(cons_3[((k+2)+2)*M*N+j*N+i]-cons_3[((k+2)-2)*M*N+j*N+i])+0.038*(cons_3[((k+2)+3)*M*N+j*N+i]-cons_3[((k+2)-3)*M*N+j*N+i])-0.0035*(cons_3[((k+2)+4)*M*N+j*N+i]-cons_3[((k+2)-4)*M*N+j*N+i]))*dxinv2;
flux_0[(k+3)*M*N+j*N+i] -= (0.8*(cons_3[((k+3)+1)*M*N+j*N+i]-cons_3[((k+3)-1)*M*N+j*N+i])-0.2*(cons_3[((k+3)+2)*M*N+j*N+i]-cons_3[((k+3)-2)*M*N+j*N+i])+0.038*(cons_3[((k+3)+3)*M*N+j*N+i]-cons_3[((k+3)-3)*M*N+j*N+i])-0.0035*(cons_3[((k+3)+4)*M*N+j*N+i]-cons_3[((k+3)-4)*M*N+j*N+i]))*dxinv2;
flux_1[k*M*N+j*N+i] -= (0.8*(cons_1[(k+1)*M*N+j*N+i]*q_3[(k+1)*M*N+j*N+i]-cons_1[(k-1)*M*N+j*N+i]*q_3[(k-1)*M*N+j*N+i])-0.2*(cons_1[(k+2)*M*N+j*N+i]*q_3[(k+2)*M*N+j*N+i]-cons_1[(k-2)*M*N+j*N+i]*q_3[(k-2)*M*N+j*N+i])+0.038*(cons_1[(k+3)*M*N+j*N+i]*q_3[(k+3)*M*N+j*N+i]-cons_1[(k-3)*M*N+j*N+i]*q_3[(k-3)*M*N+j*N+i])-0.0035*(cons_1[(k+4)*M*N+j*N+i]*q_3[(k+4)*M*N+j*N+i]-cons_1[(k-4)*M*N+j*N+i]*q_3[(k-4)*M*N+j*N+i]))*dxinv2;
flux_1[(k+1)*M*N+j*N+i] -= (0.8*(cons_1[((k+1)+1)*M*N+j*N+i]*q_3[((k+1)+1)*M*N+j*N+i]-cons_1[((k+1)-1)*M*N+j*N+i]*q_3[((k+1)-1)*M*N+j*N+i])-0.2*(cons_1[((k+1)+2)*M*N+j*N+i]*q_3[((k+1)+2)*M*N+j*N+i]-cons_1[((k+1)-2)*M*N+j*N+i]*q_3[((k+1)-2)*M*N+j*N+i])+0.038*(cons_1[((k+1)+3)*M*N+j*N+i]*q_3[((k+1)+3)*M*N+j*N+i]-cons_1[((k+1)-3)*M*N+j*N+i]*q_3[((k+1)-3)*M*N+j*N+i])-0.0035*(cons_1[((k+1)+4)*M*N+j*N+i]*q_3[((k+1)+4)*M*N+j*N+i]-cons_1[((k+1)-4)*M*N+j*N+i]*q_3[((k+1)-4)*M*N+j*N+i]))*dxinv2;
flux_1[(k+2)*M*N+j*N+i] -= (0.8*(cons_1[((k+2)+1)*M*N+j*N+i]*q_3[((k+2)+1)*M*N+j*N+i]-cons_1[((k+2)-1)*M*N+j*N+i]*q_3[((k+2)-1)*M*N+j*N+i])-0.2*(cons_1[((k+2)+2)*M*N+j*N+i]*q_3[((k+2)+2)*M*N+j*N+i]-cons_1[((k+2)-2)*M*N+j*N+i]*q_3[((k+2)-2)*M*N+j*N+i])+0.038*(cons_1[((k+2)+3)*M*N+j*N+i]*q_3[((k+2)+3)*M*N+j*N+i]-cons_1[((k+2)-3)*M*N+j*N+i]*q_3[((k+2)-3)*M*N+j*N+i])-0.0035*(cons_1[((k+2)+4)*M*N+j*N+i]*q_3[((k+2)+4)*M*N+j*N+i]-cons_1[((k+2)-4)*M*N+j*N+i]*q_3[((k+2)-4)*M*N+j*N+i]))*dxinv2;
flux_1[(k+3)*M*N+j*N+i] -= (0.8*(cons_1[((k+3)+1)*M*N+j*N+i]*q_3[((k+3)+1)*M*N+j*N+i]-cons_1[((k+3)-1)*M*N+j*N+i]*q_3[((k+3)-1)*M*N+j*N+i])-0.2*(cons_1[((k+3)+2)*M*N+j*N+i]*q_3[((k+3)+2)*M*N+j*N+i]-cons_1[((k+3)-2)*M*N+j*N+i]*q_3[((k+3)-2)*M*N+j*N+i])+0.038*(cons_1[((k+3)+3)*M*N+j*N+i]*q_3[((k+3)+3)*M*N+j*N+i]-cons_1[((k+3)-3)*M*N+j*N+i]*q_3[((k+3)-3)*M*N+j*N+i])-0.0035*(cons_1[((k+3)+4)*M*N+j*N+i]*q_3[((k+3)+4)*M*N+j*N+i]-cons_1[((k+3)-4)*M*N+j*N+i]*q_3[((k+3)-4)*M*N+j*N+i]))*dxinv2;
flux_2[k*M*N+j*N+i] -= (0.8*(cons_2[(k+1)*M*N+j*N+i]*q_3[(k+1)*M*N+j*N+i]-cons_2[(k-1)*M*N+j*N+i]*q_3[(k-1)*M*N+j*N+i])-0.2*(cons_2[(k+2)*M*N+j*N+i]*q_3[(k+2)*M*N+j*N+i]-cons_2[(k-2)*M*N+j*N+i]*q_3[(k-2)*M*N+j*N+i])+0.038*(cons_2[(k+3)*M*N+j*N+i]*q_3[(k+3)*M*N+j*N+i]-cons_2[(k-3)*M*N+j*N+i]*q_3[(k-3)*M*N+j*N+i])-0.0035*(cons_2[(k+4)*M*N+j*N+i]*q_3[(k+4)*M*N+j*N+i]-cons_2[(k-4)*M*N+j*N+i]*q_3[(k-4)*M*N+j*N+i]))*dxinv2;
flux_2[(k+1)*M*N+j*N+i] -= (0.8*(cons_2[((k+1)+1)*M*N+j*N+i]*q_3[((k+1)+1)*M*N+j*N+i]-cons_2[((k+1)-1)*M*N+j*N+i]*q_3[((k+1)-1)*M*N+j*N+i])-0.2*(cons_2[((k+1)+2)*M*N+j*N+i]*q_3[((k+1)+2)*M*N+j*N+i]-cons_2[((k+1)-2)*M*N+j*N+i]*q_3[((k+1)-2)*M*N+j*N+i])+0.038*(cons_2[((k+1)+3)*M*N+j*N+i]*q_3[((k+1)+3)*M*N+j*N+i]-cons_2[((k+1)-3)*M*N+j*N+i]*q_3[((k+1)-3)*M*N+j*N+i])-0.0035*(cons_2[((k+1)+4)*M*N+j*N+i]*q_3[((k+1)+4)*M*N+j*N+i]-cons_2[((k+1)-4)*M*N+j*N+i]*q_3[((k+1)-4)*M*N+j*N+i]))*dxinv2;
flux_2[(k+2)*M*N+j*N+i] -= (0.8*(cons_2[((k+2)+1)*M*N+j*N+i]*q_3[((k+2)+1)*M*N+j*N+i]-cons_2[((k+2)-1)*M*N+j*N+i]*q_3[((k+2)-1)*M*N+j*N+i])-0.2*(cons_2[((k+2)+2)*M*N+j*N+i]*q_3[((k+2)+2)*M*N+j*N+i]-cons_2[((k+2)-2)*M*N+j*N+i]*q_3[((k+2)-2)*M*N+j*N+i])+0.038*(cons_2[((k+2)+3)*M*N+j*N+i]*q_3[((k+2)+3)*M*N+j*N+i]-cons_2[((k+2)-3)*M*N+j*N+i]*q_3[((k+2)-3)*M*N+j*N+i])-0.0035*(cons_2[((k+2)+4)*M*N+j*N+i]*q_3[((k+2)+4)*M*N+j*N+i]-cons_2[((k+2)-4)*M*N+j*N+i]*q_3[((k+2)-4)*M*N+j*N+i]))*dxinv2;
flux_2[(k+3)*M*N+j*N+i] -= (0.8*(cons_2[((k+3)+1)*M*N+j*N+i]*q_3[((k+3)+1)*M*N+j*N+i]-cons_2[((k+3)-1)*M*N+j*N+i]*q_3[((k+3)-1)*M*N+j*N+i])-0.2*(cons_2[((k+3)+2)*M*N+j*N+i]*q_3[((k+3)+2)*M*N+j*N+i]-cons_2[((k+3)-2)*M*N+j*N+i]*q_3[((k+3)-2)*M*N+j*N+i])+0.038*(cons_2[((k+3)+3)*M*N+j*N+i]*q_3[((k+3)+3)*M*N+j*N+i]-cons_2[((k+3)-3)*M*N+j*N+i]*q_3[((k+3)-3)*M*N+j*N+i])-0.0035*(cons_2[((k+3)+4)*M*N+j*N+i]*q_3[((k+3)+4)*M*N+j*N+i]-cons_2[((k+3)-4)*M*N+j*N+i]*q_3[((k+3)-4)*M*N+j*N+i]))*dxinv2;
flux_3[k*M*N+j*N+i] -= (0.8*(cons_3[(k+1)*M*N+j*N+i]*q_3[(k+1)*M*N+j*N+i]-cons_3[(k-1)*M*N+j*N+i]*q_3[(k-1)*M*N+j*N+i]+(q_4[(k+1)*M*N+j*N+i]-q_4[(k-1)*M*N+j*N+i]))-0.2*(cons_3[(k+2)*M*N+j*N+i]*q_3[(k+2)*M*N+j*N+i]-cons_3[(k-2)*M*N+j*N+i]*q_3[(k-2)*M*N+j*N+i]+(q_4[(k+2)*M*N+j*N+i]-q_4[(k-2)*M*N+j*N+i]))+0.038*(cons_3[(k+3)*M*N+j*N+i]*q_3[(k+3)*M*N+j*N+i]-cons_3[(k-3)*M*N+j*N+i]*q_3[(k-3)*M*N+j*N+i]+(q_4[(k+3)*M*N+j*N+i]-q_4[(k-3)*M*N+j*N+i]))-0.0035*(cons_3[(k+4)*M*N+j*N+i]*q_3[(k+4)*M*N+j*N+i]-cons_3[(k-4)*M*N+j*N+i]*q_3[(k-4)*M*N+j*N+i]+(q_4[(k+4)*M*N+j*N+i]-q_4[(k-4)*M*N+j*N+i])))*dxinv2;
flux_3[(k+1)*M*N+j*N+i] -= (0.8*(cons_3[((k+1)+1)*M*N+j*N+i]*q_3[((k+1)+1)*M*N+j*N+i]-cons_3[((k+1)-1)*M*N+j*N+i]*q_3[((k+1)-1)*M*N+j*N+i]+(q_4[((k+1)+1)*M*N+j*N+i]-q_4[((k+1)-1)*M*N+j*N+i]))-0.2*(cons_3[((k+1)+2)*M*N+j*N+i]*q_3[((k+1)+2)*M*N+j*N+i]-cons_3[((k+1)-2)*M*N+j*N+i]*q_3[((k+1)-2)*M*N+j*N+i]+(q_4[((k+1)+2)*M*N+j*N+i]-q_4[((k+1)-2)*M*N+j*N+i]))+0.038*(cons_3[((k+1)+3)*M*N+j*N+i]*q_3[((k+1)+3)*M*N+j*N+i]-cons_3[((k+1)-3)*M*N+j*N+i]*q_3[((k+1)-3)*M*N+j*N+i]+(q_4[((k+1)+3)*M*N+j*N+i]-q_4[((k+1)-3)*M*N+j*N+i]))-0.0035*(cons_3[((k+1)+4)*M*N+j*N+i]*q_3[((k+1)+4)*M*N+j*N+i]-cons_3[((k+1)-4)*M*N+j*N+i]*q_3[((k+1)-4)*M*N+j*N+i]+(q_4[((k+1)+4)*M*N+j*N+i]-q_4[((k+1)-4)*M*N+j*N+i])))*dxinv2;
flux_3[(k+2)*M*N+j*N+i] -= (0.8*(cons_3[((k+2)+1)*M*N+j*N+i]*q_3[((k+2)+1)*M*N+j*N+i]-cons_3[((k+2)-1)*M*N+j*N+i]*q_3[((k+2)-1)*M*N+j*N+i]+(q_4[((k+2)+1)*M*N+j*N+i]-q_4[((k+2)-1)*M*N+j*N+i]))-0.2*(cons_3[((k+2)+2)*M*N+j*N+i]*q_3[((k+2)+2)*M*N+j*N+i]-cons_3[((k+2)-2)*M*N+j*N+i]*q_3[((k+2)-2)*M*N+j*N+i]+(q_4[((k+2)+2)*M*N+j*N+i]-q_4[((k+2)-2)*M*N+j*N+i]))+0.038*(cons_3[((k+2)+3)*M*N+j*N+i]*q_3[((k+2)+3)*M*N+j*N+i]-cons_3[((k+2)-3)*M*N+j*N+i]*q_3[((k+2)-3)*M*N+j*N+i]+(q_4[((k+2)+3)*M*N+j*N+i]-q_4[((k+2)-3)*M*N+j*N+i]))-0.0035*(cons_3[((k+2)+4)*M*N+j*N+i]*q_3[((k+2)+4)*M*N+j*N+i]-cons_3[((k+2)-4)*M*N+j*N+i]*q_3[((k+2)-4)*M*N+j*N+i]+(q_4[((k+2)+4)*M*N+j*N+i]-q_4[((k+2)-4)*M*N+j*N+i])))*dxinv2;
flux_3[(k+3)*M*N+j*N+i] -= (0.8*(cons_3[((k+3)+1)*M*N+j*N+i]*q_3[((k+3)+1)*M*N+j*N+i]-cons_3[((k+3)-1)*M*N+j*N+i]*q_3[((k+3)-1)*M*N+j*N+i]+(q_4[((k+3)+1)*M*N+j*N+i]-q_4[((k+3)-1)*M*N+j*N+i]))-0.2*(cons_3[((k+3)+2)*M*N+j*N+i]*q_3[((k+3)+2)*M*N+j*N+i]-cons_3[((k+3)-2)*M*N+j*N+i]*q_3[((k+3)-2)*M*N+j*N+i]+(q_4[((k+3)+2)*M*N+j*N+i]-q_4[((k+3)-2)*M*N+j*N+i]))+0.038*(cons_3[((k+3)+3)*M*N+j*N+i]*q_3[((k+3)+3)*M*N+j*N+i]-cons_3[((k+3)-3)*M*N+j*N+i]*q_3[((k+3)-3)*M*N+j*N+i]+(q_4[((k+3)+3)*M*N+j*N+i]-q_4[((k+3)-3)*M*N+j*N+i]))-0.0035*(cons_3[((k+3)+4)*M*N+j*N+i]*q_3[((k+3)+4)*M*N+j*N+i]-cons_3[((k+3)-4)*M*N+j*N+i]*q_3[((k+3)-4)*M*N+j*N+i]+(q_4[((k+3)+4)*M*N+j*N+i]-q_4[((k+3)-4)*M*N+j*N+i])))*dxinv2;
}
}
__global__ void hypterm_3 (double * __restrict__ flux_in_0, double * __restrict__ flux_in_1, double * __restrict__ flux_in_2, double * __restrict__ flux_in_3, double * __restrict__ flux_in_4, double * __restrict__ cons_in_1, double * __restrict__ cons_in_2, double * __restrict__ cons_in_3, double * __restrict__ cons_in_4, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(2*blockdim_k);
int k = max (k0, 0) + 2*(int)(threadIdx.z);
double (*flux_4)[308][308] = (double (*)[308][308])flux_in_4;
double (*cons_4)[308][308] = (double (*)[308][308])cons_in_4;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
double _t_5_;
double _t_1_;
double _t_2_;
double _t_3_;
double _t_4_;
double _t_0_;
double flux_4kc0jc0ic0 = 0;
double _t_11_;
double _t_7_;
double _t_10_;
double _t_8_;
double _t_9_;
double _t_6_;
double flux_4kp1jc0ic0 = 0;
double _t_17_;
double _t_13_;
double _t_14_;
double _t_15_;
double _t_16_;
double _t_12_;
double _t_23_;
double _t_19_;
double _t_20_;
double _t_21_;
double _t_22_;
double _t_18_;
double _t_29_;
double _t_34_;
double _t_25_;
double _t_27_;
double _t_31_;
double _t_32_;
double _t_26_;
double _t_33_;
double _t_28_;
double _t_35_;
double _t_24_;
double _t_30_;
_t_5_ = -cons_4[k][j][i-4] * q_1[k][j][i-4];
_t_5_ -= q_4[k][j][i-4] * q_1[k][j][i-4];
_t_5_ += cons_4[k][j][i+4] * q_1[k][j][i+4];
_t_5_ += q_4[k][j][i+4] * q_1[k][j][i+4];
_t_1_ = -0.0035 * _t_5_;
_t_2_ = -cons_4[k][j][i-1] * q_1[k][j][i-1];
_t_2_ -= q_4[k][j][i-1] * q_1[k][j][i-1];
_t_2_ += cons_4[k][j][i+1] * q_1[k][j][i+1];
_t_2_ += q_4[k][j][i+1] * q_1[k][j][i+1];
_t_1_ += 0.8 * _t_2_;
_t_3_ = -cons_4[k][j][i-2] * q_1[k][j][i-2];
_t_3_ -= q_4[k][j][i-2] * q_1[k][j][i-2];
_t_3_ += cons_4[k][j][i+2] * q_1[k][j][i+2];
_t_3_ += q_4[k][j][i+2] * q_1[k][j][i+2];
_t_1_ -= 0.2 * _t_3_;
_t_4_ = -cons_4[k][j][i-3] * q_1[k][j][i-3];
_t_4_ -= q_4[k][j][i-3] * q_1[k][j][i-3];
_t_4_ += cons_4[k][j][i+3] * q_1[k][j][i+3];
_t_4_ += q_4[k][j][i+3] * q_1[k][j][i+3];
_t_1_ += 0.038 * _t_4_;
_t_0_ = _t_1_ * dxinv0;
flux_4kc0jc0ic0 -= _t_0_;
_t_11_ = -cons_4[k+1][j][i-4] * q_1[k+1][j][i-4];
_t_11_ -= q_4[k+1][j][i-4] * q_1[k+1][j][i-4];
_t_11_ += cons_4[k+1][j][i+4] * q_1[k+1][j][i+4];
_t_11_ += q_4[k+1][j][i+4] * q_1[k+1][j][i+4];
_t_7_ = -0.0035 * _t_11_;
_t_10_ = -cons_4[k+1][j][i-3] * q_1[k+1][j][i-3];
_t_10_ -= q_4[k+1][j][i-3] * q_1[k+1][j][i-3];
_t_10_ += cons_4[k+1][j][i+3] * q_1[k+1][j][i+3];
_t_10_ += q_4[k+1][j][i+3] * q_1[k+1][j][i+3];
_t_7_ += 0.038 * _t_10_;
_t_8_ = -cons_4[k+1][j][i-1] * q_1[k+1][j][i-1];
_t_8_ -= q_4[k+1][j][i-1] * q_1[k+1][j][i-1];
_t_8_ += cons_4[k+1][j][i+1] * q_1[k+1][j][i+1];
_t_8_ += q_4[k+1][j][i+1] * q_1[k+1][j][i+1];
_t_7_ += 0.8 * _t_8_;
_t_9_ = -cons_4[k+1][j][i-2] * q_1[k+1][j][i-2];
_t_9_ -= q_4[k+1][j][i-2] * q_1[k+1][j][i-2];
_t_9_ += cons_4[k+1][j][i+2] * q_1[k+1][j][i+2];
_t_9_ += q_4[k+1][j][i+2] * q_1[k+1][j][i+2];
_t_7_ -= 0.2 * _t_9_;
_t_6_ = _t_7_ * dxinv0;
flux_4kp1jc0ic0 -= _t_6_;
_t_17_ = -cons_4[k][j-4][i] * q_2[k][j-4][i];
_t_17_ -= q_4[k][j-4][i] * q_2[k][j-4][i];
_t_17_ += cons_4[k][j+4][i] * q_2[k][j+4][i];
_t_17_ += q_4[k][j+4][i] * q_2[k][j+4][i];
_t_13_ = -0.0035 * _t_17_;
_t_14_ = -cons_4[k][j-1][i] * q_2[k][j-1][i];
_t_14_ -= q_4[k][j-1][i] * q_2[k][j-1][i];
_t_14_ += cons_4[k][j+1][i] * q_2[k][j+1][i];
_t_14_ += q_4[k][j+1][i] * q_2[k][j+1][i];
_t_13_ += 0.8 * _t_14_;
_t_15_ = -cons_4[k][j-2][i] * q_2[k][j-2][i];
_t_15_ -= q_4[k][j-2][i] * q_2[k][j-2][i];
_t_15_ += cons_4[k][j+2][i] * q_2[k][j+2][i];
_t_15_ += q_4[k][j+2][i] * q_2[k][j+2][i];
_t_13_ -= 0.2 * _t_15_;
_t_16_ = -cons_4[k][j-3][i] * q_2[k][j-3][i];
_t_16_ -= q_4[k][j-3][i] * q_2[k][j-3][i];
_t_16_ += cons_4[k][j+3][i] * q_2[k][j+3][i];
_t_16_ += q_4[k][j+3][i] * q_2[k][j+3][i];
_t_13_ += 0.038 * _t_16_;
_t_12_ = _t_13_ * dxinv1;
flux_4kc0jc0ic0 -= _t_12_;
_t_23_ = -cons_4[k+1][j-4][i] * q_2[k+1][j-4][i];
_t_23_ -= q_4[k+1][j-4][i] * q_2[k+1][j-4][i];
_t_23_ += cons_4[k+1][j+4][i] * q_2[k+1][j+4][i];
_t_23_ += q_4[k+1][j+4][i] * q_2[k+1][j+4][i];
_t_19_ = -0.0035 * _t_23_;
_t_20_ = -cons_4[k+1][j-1][i] * q_2[k+1][j-1][i];
_t_20_ -= q_4[k+1][j-1][i] * q_2[k+1][j-1][i];
_t_20_ += cons_4[k+1][j+1][i] * q_2[k+1][j+1][i];
_t_20_ += q_4[k+1][j+1][i] * q_2[k+1][j+1][i];
_t_19_ += 0.8 * _t_20_;
_t_21_ = -cons_4[k+1][j-2][i] * q_2[k+1][j-2][i];
_t_21_ -= q_4[k+1][j-2][i] * q_2[k+1][j-2][i];
_t_21_ += cons_4[k+1][j+2][i] * q_2[k+1][j+2][i];
_t_21_ += q_4[k+1][j+2][i] * q_2[k+1][j+2][i];
_t_19_ -= 0.2 * _t_21_;
_t_22_ = -cons_4[k+1][j-3][i] * q_2[k+1][j-3][i];
_t_22_ -= q_4[k+1][j-3][i] * q_2[k+1][j-3][i];
_t_22_ += cons_4[k+1][j+3][i] * q_2[k+1][j+3][i];
_t_22_ += q_4[k+1][j+3][i] * q_2[k+1][j+3][i];
_t_19_ += 0.038 * _t_22_;
_t_18_ = _t_19_ * dxinv1;
flux_4kp1jc0ic0 -= _t_18_;
_t_29_ = -cons_4[k-4][j][i] * q_3[k-4][j][i];
_t_29_ -= q_4[k-4][j][i] * q_3[k-4][j][i];
_t_29_ += cons_4[k+4][j][i] * q_3[k+4][j][i];
_t_34_ = cons_4[k+4][j][i] * q_3[k+4][j][i];
_t_29_ += q_4[k+4][j][i] * q_3[k+4][j][i];
_t_34_ += q_4[k+4][j][i] * q_3[k+4][j][i];
_t_25_ = -0.0035 * _t_29_;
_t_34_ -= cons_4[k-2][j][i] * q_3[k-2][j][i];
_t_27_ = -cons_4[k-2][j][i] * q_3[k-2][j][i];
_t_27_ -= q_4[k-2][j][i] * q_3[k-2][j][i];
_t_34_ -= q_4[k-2][j][i] * q_3[k-2][j][i];
_t_31_ = 0.038 * _t_34_;
_t_27_ += cons_4[k+2][j][i] * q_3[k+2][j][i];
_t_32_ = cons_4[k+2][j][i] * q_3[k+2][j][i];
_t_27_ += q_4[k+2][j][i] * q_3[k+2][j][i];
_t_25_ -= 0.2 * _t_27_;
_t_32_ += q_4[k+2][j][i] * q_3[k+2][j][i];
_t_32_ -= cons_4[k][j][i] * q_3[k][j][i];
_t_32_ -= q_4[k][j][i] * q_3[k][j][i];
_t_31_ += 0.8 * _t_32_;
_t_26_ = cons_4[k+1][j][i] * q_3[k+1][j][i];
_t_26_ += q_4[k+1][j][i] * q_3[k+1][j][i];
_t_26_ -= cons_4[k-1][j][i] * q_3[k-1][j][i];
_t_33_ = -cons_4[k-1][j][i] * q_3[k-1][j][i];
_t_26_ -= q_4[k-1][j][i] * q_3[k-1][j][i];
_t_25_ += 0.8 * _t_26_;
_t_33_ -= q_4[k-1][j][i] * q_3[k-1][j][i];
_t_33_ += cons_4[k+3][j][i] * q_3[k+3][j][i];
_t_28_ = cons_4[k+3][j][i] * q_3[k+3][j][i];
_t_28_ += q_4[k+3][j][i] * q_3[k+3][j][i];
_t_33_ += q_4[k+3][j][i] * q_3[k+3][j][i];
_t_31_ -= 0.2 * _t_33_;
_t_28_ -= cons_4[k-3][j][i] * q_3[k-3][j][i];
_t_35_ = -cons_4[k-3][j][i] * q_3[k-3][j][i];
_t_28_ -= q_4[k-3][j][i] * q_3[k-3][j][i];
_t_25_ += 0.038 * _t_28_;
_t_35_ -= q_4[k-3][j][i] * q_3[k-3][j][i];
_t_35_ += cons_4[k+5][j][i] * q_3[k+5][j][i];
_t_35_ += q_4[k+5][j][i] * q_3[k+5][j][i];
_t_31_ -= 0.0035 * _t_35_;
_t_24_ = _t_25_ * dxinv2;
flux_4kc0jc0ic0 -= _t_24_;
flux_4[k][j][i] = flux_4kc0jc0ic0;
_t_30_ = _t_31_ * dxinv2;
flux_4kp1jc0ic0 -= _t_30_;
flux_4[k+1][j][i] = flux_4kp1jc0ic0;
}
}
extern "C" void host_code (double *h_flux_0, double *h_flux_1, double *h_flux_2, double *h_flux_3, double *h_flux_4, double *h_cons_1, double *h_cons_2, double *h_cons_3, double *h_cons_4, double *h_q_1, double *h_q_2, double *h_q_3, double *h_q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
double *flux_0;
cudaMalloc (&flux_0, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_0\n");
cudaMemcpy (flux_0, h_flux_0, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_1;
cudaMalloc (&flux_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_1\n");
cudaMemcpy (flux_1, h_flux_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_2;
cudaMalloc (&flux_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_2\n");
cudaMemcpy (flux_2, h_flux_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_3;
cudaMalloc (&flux_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_3\n");
cudaMemcpy (flux_3, h_flux_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_4;
cudaMalloc (&flux_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_4\n");
cudaMemcpy (flux_4, h_flux_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_1;
cudaMalloc (&cons_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_1\n");
cudaMemcpy (cons_1, h_cons_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_2;
cudaMalloc (&cons_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_2\n");
cudaMemcpy (cons_2, h_cons_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_3;
cudaMalloc (&cons_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_3\n");
cudaMemcpy (cons_3, h_cons_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_4;
cudaMalloc (&cons_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_4\n");
cudaMemcpy (cons_4, h_cons_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_1;
cudaMalloc (&q_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_1\n");
cudaMemcpy (q_1, h_q_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_2;
cudaMalloc (&q_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_2\n");
cudaMemcpy (q_2, h_q_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_3;
cudaMalloc (&q_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_3\n");
cudaMemcpy (q_3, h_q_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_4;
cudaMalloc (&q_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_4\n");
cudaMemcpy (q_4, h_q_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 4, 4);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, blockconfig.z));
hypterm_1 <<<gridconfig, blockconfig>>> (flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, dxinv0, dxinv1, dxinv2, L, M, N);
dim3 gridconfig_2 (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, 4*blockconfig.z));
hypterm_2 <<<gridconfig_2, blockconfig>>> (flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, dxinv0, dxinv1, dxinv2, L, M, N);
dim3 gridconfig_3 (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, 2*blockconfig.z));
hypterm_3 <<<gridconfig_3, blockconfig>>> (flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, dxinv0, dxinv1, dxinv2, L, M, N);
cudaMemcpy (h_flux_0, flux_0, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_1, flux_1, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_3, flux_3, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_4, flux_4, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_2, flux_2, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
}
|
84370c0830a42d2ca5dbc05e81c7287189a8214e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2009, Andrew Corrigan, [email protected]
// This code is from the AIAA-2009-4001 paper
//#include <cutil.h>
#include <iostream>
#include <fstream>
#include "cudacommon.h"
#include "ResultDatabase.h"
#include "OptionParser.h"
#define SEED 7
/*
* Options
*
*/
#define GAMMA 1.4f
#define iterations 10
// #ifndef block_length
// #define block_length 192
// #endif
#define NDIM 3
#define NNB 4
#define RK 3 // 3rd order RK
#define ff_mach 1.2f
#define deg_angle_of_attack 0.0f
/*
* not options
*/
#ifdef RD_WG_SIZE_0_0
#define BLOCK_SIZE_0 RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define BLOCK_SIZE_0 RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_0 RD_WG_SIZE
#else
#define BLOCK_SIZE_0 192
#endif
#ifdef RD_WG_SIZE_1_0
#define BLOCK_SIZE_1 RD_WG_SIZE_1_0
#elif defined(RD_WG_SIZE_1)
#define BLOCK_SIZE_1 RD_WG_SIZE_1
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_1 RD_WG_SIZE
#else
#define BLOCK_SIZE_1 192
#endif
#ifdef RD_WG_SIZE_2_0
#define BLOCK_SIZE_2 RD_WG_SIZE_2_0
#elif defined(RD_WG_SIZE_1)
#define BLOCK_SIZE_2 RD_WG_SIZE_2
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_2 RD_WG_SIZE
#else
#define BLOCK_SIZE_2 192
#endif
#ifdef RD_WG_SIZE_3_0
#define BLOCK_SIZE_3 RD_WG_SIZE_3_0
#elif defined(RD_WG_SIZE_3)
#define BLOCK_SIZE_3 RD_WG_SIZE_3
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_3 RD_WG_SIZE
#else
#define BLOCK_SIZE_3 192
#endif
#ifdef RD_WG_SIZE_4_0
#define BLOCK_SIZE_4 RD_WG_SIZE_4_0
#elif defined(RD_WG_SIZE_4)
#define BLOCK_SIZE_4 RD_WG_SIZE_4
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_4 RD_WG_SIZE
#else
#define BLOCK_SIZE_4 192
#endif
// #if block_length > 128
// #warning "the kernels may fail too launch on some systems if the block length is too large"
// #endif
#define VAR_DENSITY 0
#define VAR_MOMENTUM 1
#define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM)
#define NVAR (VAR_DENSITY_ENERGY+1)
float kernelTime = 0.0f;
float transferTime = 0.0f;
hipEvent_t start, stop;
float elapsed;
/*
* Generic functions
*/
template <typename T>
T* alloc(int N)
{
T* t;
CUDA_SAFE_CALL(hipMalloc((void**)&t, sizeof(T)*N));
return t;
}
template <typename T>
void dealloc(T* array)
{
CUDA_SAFE_CALL(hipFree((void*)array));
}
template <typename T>
void copy(T* dst, T* src, int N)
{
hipEventRecord(start, 0);
CUDA_SAFE_CALL(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyDeviceToDevice));
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
transferTime += elapsed * 1.e-3;
}
template <typename T>
void upload(T* dst, T* src, int N)
{
hipEventRecord(start, 0);
CUDA_SAFE_CALL(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyHostToDevice));
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
transferTime += elapsed * 1.e-3;
}
template <typename T>
void download(T* dst, T* src, int N)
{
hipEventRecord(start, 0);
CUDA_SAFE_CALL(hipMemcpy((void*)dst, (void*)src, N*sizeof(T), hipMemcpyDeviceToHost));
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
transferTime += elapsed * 1.e-3;
}
void dump(float* variables, int nel, int nelr)
{
float* h_variables = new float[nelr*NVAR];
download(h_variables, variables, nelr*NVAR);
{
std::ofstream file("density");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY*nelr] << std::endl;
}
{
std::ofstream file("momentum");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++)
{
for(int j = 0; j != NDIM; j++)
file << h_variables[i + (VAR_MOMENTUM+j)*nelr] << " ";
file << std::endl;
}
}
{
std::ofstream file("density_energy");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY_ENERGY*nelr] << std::endl;
}
delete[] h_variables;
}
/*
* Element-based Cell-centered FVM solver functions
*/
__constant__ float ff_variable[NVAR];
__constant__ float3 ff_flux_contribution_momentum_x[1];
__constant__ float3 ff_flux_contribution_momentum_y[1];
__constant__ float3 ff_flux_contribution_momentum_z[1];
__constant__ float3 ff_flux_contribution_density_energy[1];
__global__ void cuda_initialize_variables(int nelr, float* variables)
{
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
for(int j = 0; j < NVAR; j++)
variables[i + j*nelr] = ff_variable[j];
}
void initialize_variables(int nelr, float* variables)
{
dim3 Dg(nelr / BLOCK_SIZE_1), Db(BLOCK_SIZE_1);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( cuda_initialize_variables), dim3(Dg), dim3(Db), 0, 0, nelr, variables);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
kernelTime += elapsed * 1.e-3;
CHECK_CUDA_ERROR();
}
__device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy)
{
fc_momentum_x.x = velocity.x*momentum.x + pressure;
fc_momentum_x.y = velocity.x*momentum.y;
fc_momentum_x.z = velocity.x*momentum.z;
fc_momentum_y.x = fc_momentum_x.y;
fc_momentum_y.y = velocity.y*momentum.y + pressure;
fc_momentum_y.z = velocity.y*momentum.z;
fc_momentum_z.x = fc_momentum_x.z;
fc_momentum_z.y = fc_momentum_y.z;
fc_momentum_z.z = velocity.z*momentum.z + pressure;
float de_p = density_energy+pressure;
fc_density_energy.x = velocity.x*de_p;
fc_density_energy.y = velocity.y*de_p;
fc_density_energy.z = velocity.z*de_p;
}
__device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity)
{
velocity.x = momentum.x / density;
velocity.y = momentum.y / density;
velocity.z = momentum.z / density;
}
__device__ inline float compute_speed_sqd(float3& velocity)
{
return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z;
}
__device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd)
{
return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd);
}
__device__ inline float compute_speed_of_sound(float& density, float& pressure)
{
return sqrtf(float(GAMMA)*pressure/density);
}
__global__ void cuda_compute_step_factor(int nelr, float* variables, float* areas, float* step_factors)
{
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
float density = variables[i + VAR_DENSITY*nelr];
float3 momentum;
momentum.x = variables[i + (VAR_MOMENTUM+0)*nelr];
momentum.y = variables[i + (VAR_MOMENTUM+1)*nelr];
momentum.z = variables[i + (VAR_MOMENTUM+2)*nelr];
float density_energy = variables[i + VAR_DENSITY_ENERGY*nelr];
float3 velocity; compute_velocity(density, momentum, velocity);
float speed_sqd = compute_speed_sqd(velocity);
float pressure = compute_pressure(density, density_energy, speed_sqd);
float speed_of_sound = compute_speed_of_sound(density, pressure);
// dt = float(0.5f) * sqrtf(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once
step_factors[i] = float(0.5f) / (sqrtf(areas[i]) * (sqrtf(speed_sqd) + speed_of_sound));
}
void compute_step_factor(int nelr, float* variables, float* areas, float* step_factors)
{
dim3 Dg(nelr / BLOCK_SIZE_2), Db(BLOCK_SIZE_2);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( cuda_compute_step_factor), dim3(Dg), dim3(Db), 0, 0, nelr, variables, areas, step_factors);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
kernelTime += elapsed * 1.e-3;
CHECK_CUDA_ERROR();
}
/*
*
*
*/
__global__ void cuda_compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes)
{
const float smoothing_coefficient = float(0.2f);
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
int j, nb;
float3 normal; float normal_len;
float factor;
float density_i = variables[i + VAR_DENSITY*nelr];
float3 momentum_i;
momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
float flux_i_density = float(0.0f);
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < NNB; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
normal.x = normals[i + (j + 0*NNB)*nelr];
normal.y = normals[i + (j + 1*NNB)*nelr];
normal.z = normals[i + (j + 2*NNB)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
density_nb = variables[nb + VAR_DENSITY*nelr];
momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
else if(nb == -1) // a wing boundary
{
flux_i_momentum.x += normal.x*pressure_i;
flux_i_momentum.y += normal.y*pressure_i;
flux_i_momentum.z += normal.z*pressure_i;
}
else if(nb == -2) // a far field boundary
{
factor = float(0.5f)*normal.x;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+0]+momentum_i.x);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].x + flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].x + flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].x + flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+1]+momentum_i.y);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].y + flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].y + flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].y + flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+2]+momentum_i.z);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].z + flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].z + flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].z + flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY*nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy;
}
void compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes)
{
dim3 Dg(nelr / BLOCK_SIZE_3), Db(BLOCK_SIZE_3);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( cuda_compute_flux), dim3(Dg),dim3(Db), 0, 0, nelr, elements_surrounding_elements, normals, variables, fluxes);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
kernelTime += elapsed * 1.e-3;
CHECK_CUDA_ERROR();
}
__global__ void cuda_time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes)
{
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
float factor = step_factors[i]/float(RK+1-j);
variables[i + VAR_DENSITY*nelr] = old_variables[i + VAR_DENSITY*nelr] + factor*fluxes[i + VAR_DENSITY*nelr];
variables[i + VAR_DENSITY_ENERGY*nelr] = old_variables[i + VAR_DENSITY_ENERGY*nelr] + factor*fluxes[i + VAR_DENSITY_ENERGY*nelr];
variables[i + (VAR_MOMENTUM+0)*nelr] = old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr];
variables[i + (VAR_MOMENTUM+1)*nelr] = old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr];
variables[i + (VAR_MOMENTUM+2)*nelr] = old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr];
}
void time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes)
{
dim3 Dg(nelr / BLOCK_SIZE_4), Db(BLOCK_SIZE_4);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( cuda_time_step), dim3(Dg),dim3(Db), 0, 0, j, nelr, old_variables, variables, step_factors, fluxes);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
kernelTime += elapsed * 1.e-3;
CHECK_CUDA_ERROR();
}
void addBenchmarkSpecOptions(OptionParser &op) {
}
void cfd(ResultDatabase &resultDB, OptionParser &op);
void RunBenchmark(ResultDatabase &resultDB, OptionParser &op) {
printf("Running CFDSolver\n");
bool quiet = op.getOptionBool("quiet");
if(!quiet) {
printf("WG size of kernel:initialize = %d, WG size of kernel:compute_step_factor = %d, WG size of kernel:compute_flux = %d, WG size of kernel:time_step = %d\n", BLOCK_SIZE_1, BLOCK_SIZE_2, BLOCK_SIZE_3, BLOCK_SIZE_4);
}
hipEventCreate(&start);
hipEventCreate(&stop);
int passes = op.getOptionInt("passes");
for(int i = 0; i < passes; i++) {
kernelTime = 0.0f;
transferTime = 0.0f;
if(!quiet) {
printf("Pass %d:\n", i);
}
cfd(resultDB, op);
if(!quiet) {
printf("Done.\n");
}
}
}
void cfd(ResultDatabase &resultDB, OptionParser &op)
{
// set far field conditions and load them into constant memory on the gpu
{
float h_ff_variable[NVAR];
const float angle_of_attack = float(3.1415926535897931 / 180.0f) * float(deg_angle_of_attack);
h_ff_variable[VAR_DENSITY] = float(1.4);
float ff_pressure = float(1.0f);
float ff_speed_of_sound = sqrt(GAMMA*ff_pressure / h_ff_variable[VAR_DENSITY]);
float ff_speed = float(ff_mach)*ff_speed_of_sound;
float3 ff_velocity;
ff_velocity.x = ff_speed*float(cos((float)angle_of_attack));
ff_velocity.y = ff_speed*float(sin((float)angle_of_attack));
ff_velocity.z = 0.0f;
h_ff_variable[VAR_MOMENTUM+0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x;
h_ff_variable[VAR_MOMENTUM+1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y;
h_ff_variable[VAR_MOMENTUM+2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z;
h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY]*(float(0.5f)*(ff_speed*ff_speed)) + (ff_pressure / float(GAMMA-1.0f));
float3 h_ff_momentum;
h_ff_momentum.x = *(h_ff_variable+VAR_MOMENTUM+0);
h_ff_momentum.y = *(h_ff_variable+VAR_MOMENTUM+1);
h_ff_momentum.z = *(h_ff_variable+VAR_MOMENTUM+2);
float3 h_ff_flux_contribution_momentum_x;
float3 h_ff_flux_contribution_momentum_y;
float3 h_ff_flux_contribution_momentum_z;
float3 h_ff_flux_contribution_density_energy;
compute_flux_contribution(h_ff_variable[VAR_DENSITY], h_ff_momentum, h_ff_variable[VAR_DENSITY_ENERGY], ff_pressure, ff_velocity, h_ff_flux_contribution_momentum_x, h_ff_flux_contribution_momentum_y, h_ff_flux_contribution_momentum_z, h_ff_flux_contribution_density_energy);
// copy far field conditions to the gpu
hipEventRecord(start, 0);
CUDA_SAFE_CALL( hipMemcpyToSymbol(ff_variable, h_ff_variable, NVAR*sizeof(float)) );
CUDA_SAFE_CALL( hipMemcpyToSymbol(ff_flux_contribution_momentum_x, &h_ff_flux_contribution_momentum_x, sizeof(float3)) );
CUDA_SAFE_CALL( hipMemcpyToSymbol(ff_flux_contribution_momentum_y, &h_ff_flux_contribution_momentum_y, sizeof(float3)) );
CUDA_SAFE_CALL( hipMemcpyToSymbol(ff_flux_contribution_momentum_z, &h_ff_flux_contribution_momentum_z, sizeof(float3)) );
CUDA_SAFE_CALL( hipMemcpyToSymbol(ff_flux_contribution_density_energy, &h_ff_flux_contribution_density_energy, sizeof(float3)) );
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
transferTime += elapsed * 1.e-3;
}
int nel;
int nelr;
// read in domain geometry
float* areas;
int* elements_surrounding_elements;
float* normals;
{
string inputFile = op.getOptionString("inputFile");
std::ifstream file(inputFile.c_str());
if(inputFile != "") {
file >> nel;
} else {
int problemSizes[4] = {97000, 200000, 40000000, 60000000};
nel = problemSizes[op.getOptionInt("size") - 1];
}
nelr = BLOCK_SIZE_0*((nel / BLOCK_SIZE_0 )+ ::min(1, nel % BLOCK_SIZE_0));
float* h_areas = new float[nelr];
int* h_elements_surrounding_elements = new int[nelr*NNB];
float* h_normals = new float[nelr*NDIM*NNB];
srand(SEED);
// read in data
for(int i = 0; i < nel; i++)
{
if(inputFile != "") {
file >> h_areas[i];
} else {
h_areas[i] = 1.0 * rand() / RAND_MAX;
}
for(int j = 0; j < NNB; j++) // NNB is always 4
{
if(inputFile != "") {
file >> h_elements_surrounding_elements[i + j*nelr];
} else {
int val = i + (rand() % 20) - 10;
h_elements_surrounding_elements[i + j * nelr] = val;
}
if(h_elements_surrounding_elements[i+j*nelr] < 0) h_elements_surrounding_elements[i+j*nelr] = -1;
h_elements_surrounding_elements[i + j*nelr]--; //it's coming in with Fortran numbering
for(int k = 0; k < NDIM; k++) // NDIM is always 3
{
if(inputFile != "") {
file >> h_normals[i + (j + k*NNB)*nelr];
} else {
h_normals[i + (j + k*NNB)*nelr] = 1.0 * rand() / RAND_MAX - 0.5;
}
h_normals[i + (j + k*NNB)*nelr] = -h_normals[i + (j + k*NNB)*nelr];
}
}
}
// fill in remaining data
int last = nel-1;
for(int i = nel; i < nelr; i++)
{
h_areas[i] = h_areas[last];
for(int j = 0; j < NNB; j++)
{
// duplicate the last element
h_elements_surrounding_elements[i + j*nelr] = h_elements_surrounding_elements[last + j*nelr];
for(int k = 0; k < NDIM; k++) h_normals[last + (j + k*NNB)*nelr] = h_normals[last + (j + k*NNB)*nelr];
}
}
areas = alloc<float>(nelr);
upload<float>(areas, h_areas, nelr);
elements_surrounding_elements = alloc<int>(nelr*NNB);
upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr*NNB);
normals = alloc<float>(nelr*NDIM*NNB);
upload<float>(normals, h_normals, nelr*NDIM*NNB);
delete[] h_areas;
delete[] h_elements_surrounding_elements;
delete[] h_normals;
}
// Create arrays and set initial conditions
float* variables = alloc<float>(nelr*NVAR);
initialize_variables(nelr, variables);
float* old_variables = alloc<float>(nelr*NVAR);
float* fluxes = alloc<float>(nelr*NVAR);
float* step_factors = alloc<float>(nelr);
// make sure all memory is floatly allocated before we start timing
initialize_variables(nelr, old_variables);
initialize_variables(nelr, fluxes);
hipMemset( (void*) step_factors, 0, sizeof(float)*nelr );
// make sure CUDA isn't still doing something before we start timing
hipDeviceSynchronize();
// these need to be computed the first time in order to compute time step
// unsigned int timer = 0;
// CUT_SAFE_CALL( cutCreateTimer( &timer));
// CUT_SAFE_CALL( cutStartTimer( timer));
// Begin iterations
for(int i = 0; i < iterations; i++)
{
copy<float>(old_variables, variables, nelr*NVAR);
// for the first iteration we compute the time step
compute_step_factor(nelr, variables, areas, step_factors);
CHECK_CUDA_ERROR();
for(int j = 0; j < RK; j++)
{
compute_flux(nelr, elements_surrounding_elements, normals, variables, fluxes);
CHECK_CUDA_ERROR();
time_step(j, nelr, old_variables, variables, step_factors, fluxes);
CHECK_CUDA_ERROR();
}
}
hipDeviceSynchronize();
// CUT_SAFE_CALL( cutStopTimer(timer) );
if(op.getOptionBool("verbose")) {
dump(variables, nel, nelr);
}
dealloc<float>(areas);
dealloc<int>(elements_surrounding_elements);
dealloc<float>(normals);
dealloc<float>(variables);
dealloc<float>(old_variables);
dealloc<float>(fluxes);
dealloc<float>(step_factors);
char atts[1024];
sprintf(atts, "numelements:%d", nel);
resultDB.AddResult("cfd_kernel_time", atts, "sec", kernelTime);
resultDB.AddResult("cfd_transfer_time", atts, "sec", transferTime);
resultDB.AddResult("cfd_parity", atts, "N", transferTime / kernelTime);
resultDB.AddOverall("Time", "sec", kernelTime+transferTime);
}
| 84370c0830a42d2ca5dbc05e81c7287189a8214e.cu | // Copyright 2009, Andrew Corrigan, [email protected]
// This code is from the AIAA-2009-4001 paper
//#include <cutil.h>
#include <iostream>
#include <fstream>
#include "cudacommon.h"
#include "ResultDatabase.h"
#include "OptionParser.h"
#define SEED 7
/*
* Options
*
*/
#define GAMMA 1.4f
#define iterations 10
// #ifndef block_length
// #define block_length 192
// #endif
#define NDIM 3
#define NNB 4
#define RK 3 // 3rd order RK
#define ff_mach 1.2f
#define deg_angle_of_attack 0.0f
/*
* not options
*/
#ifdef RD_WG_SIZE_0_0
#define BLOCK_SIZE_0 RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define BLOCK_SIZE_0 RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_0 RD_WG_SIZE
#else
#define BLOCK_SIZE_0 192
#endif
#ifdef RD_WG_SIZE_1_0
#define BLOCK_SIZE_1 RD_WG_SIZE_1_0
#elif defined(RD_WG_SIZE_1)
#define BLOCK_SIZE_1 RD_WG_SIZE_1
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_1 RD_WG_SIZE
#else
#define BLOCK_SIZE_1 192
#endif
#ifdef RD_WG_SIZE_2_0
#define BLOCK_SIZE_2 RD_WG_SIZE_2_0
#elif defined(RD_WG_SIZE_1)
#define BLOCK_SIZE_2 RD_WG_SIZE_2
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_2 RD_WG_SIZE
#else
#define BLOCK_SIZE_2 192
#endif
#ifdef RD_WG_SIZE_3_0
#define BLOCK_SIZE_3 RD_WG_SIZE_3_0
#elif defined(RD_WG_SIZE_3)
#define BLOCK_SIZE_3 RD_WG_SIZE_3
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_3 RD_WG_SIZE
#else
#define BLOCK_SIZE_3 192
#endif
#ifdef RD_WG_SIZE_4_0
#define BLOCK_SIZE_4 RD_WG_SIZE_4_0
#elif defined(RD_WG_SIZE_4)
#define BLOCK_SIZE_4 RD_WG_SIZE_4
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_4 RD_WG_SIZE
#else
#define BLOCK_SIZE_4 192
#endif
// #if block_length > 128
// #warning "the kernels may fail too launch on some systems if the block length is too large"
// #endif
#define VAR_DENSITY 0
#define VAR_MOMENTUM 1
#define VAR_DENSITY_ENERGY (VAR_MOMENTUM+NDIM)
#define NVAR (VAR_DENSITY_ENERGY+1)
float kernelTime = 0.0f;
float transferTime = 0.0f;
cudaEvent_t start, stop;
float elapsed;
/*
* Generic functions
*/
template <typename T>
T* alloc(int N)
{
T* t;
CUDA_SAFE_CALL(cudaMalloc((void**)&t, sizeof(T)*N));
return t;
}
template <typename T>
void dealloc(T* array)
{
CUDA_SAFE_CALL(cudaFree((void*)array));
}
template <typename T>
void copy(T* dst, T* src, int N)
{
cudaEventRecord(start, 0);
CUDA_SAFE_CALL(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyDeviceToDevice));
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
transferTime += elapsed * 1.e-3;
}
template <typename T>
void upload(T* dst, T* src, int N)
{
cudaEventRecord(start, 0);
CUDA_SAFE_CALL(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyHostToDevice));
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
transferTime += elapsed * 1.e-3;
}
template <typename T>
void download(T* dst, T* src, int N)
{
cudaEventRecord(start, 0);
CUDA_SAFE_CALL(cudaMemcpy((void*)dst, (void*)src, N*sizeof(T), cudaMemcpyDeviceToHost));
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
transferTime += elapsed * 1.e-3;
}
void dump(float* variables, int nel, int nelr)
{
float* h_variables = new float[nelr*NVAR];
download(h_variables, variables, nelr*NVAR);
{
std::ofstream file("density");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY*nelr] << std::endl;
}
{
std::ofstream file("momentum");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++)
{
for(int j = 0; j != NDIM; j++)
file << h_variables[i + (VAR_MOMENTUM+j)*nelr] << " ";
file << std::endl;
}
}
{
std::ofstream file("density_energy");
file << nel << " " << nelr << std::endl;
for(int i = 0; i < nel; i++) file << h_variables[i + VAR_DENSITY_ENERGY*nelr] << std::endl;
}
delete[] h_variables;
}
/*
* Element-based Cell-centered FVM solver functions
*/
__constant__ float ff_variable[NVAR];
__constant__ float3 ff_flux_contribution_momentum_x[1];
__constant__ float3 ff_flux_contribution_momentum_y[1];
__constant__ float3 ff_flux_contribution_momentum_z[1];
__constant__ float3 ff_flux_contribution_density_energy[1];
__global__ void cuda_initialize_variables(int nelr, float* variables)
{
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
for(int j = 0; j < NVAR; j++)
variables[i + j*nelr] = ff_variable[j];
}
void initialize_variables(int nelr, float* variables)
{
dim3 Dg(nelr / BLOCK_SIZE_1), Db(BLOCK_SIZE_1);
cudaEventRecord(start, 0);
cuda_initialize_variables<<<Dg, Db>>>(nelr, variables);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
kernelTime += elapsed * 1.e-3;
CHECK_CUDA_ERROR();
}
__device__ __host__ inline void compute_flux_contribution(float& density, float3& momentum, float& density_energy, float& pressure, float3& velocity, float3& fc_momentum_x, float3& fc_momentum_y, float3& fc_momentum_z, float3& fc_density_energy)
{
fc_momentum_x.x = velocity.x*momentum.x + pressure;
fc_momentum_x.y = velocity.x*momentum.y;
fc_momentum_x.z = velocity.x*momentum.z;
fc_momentum_y.x = fc_momentum_x.y;
fc_momentum_y.y = velocity.y*momentum.y + pressure;
fc_momentum_y.z = velocity.y*momentum.z;
fc_momentum_z.x = fc_momentum_x.z;
fc_momentum_z.y = fc_momentum_y.z;
fc_momentum_z.z = velocity.z*momentum.z + pressure;
float de_p = density_energy+pressure;
fc_density_energy.x = velocity.x*de_p;
fc_density_energy.y = velocity.y*de_p;
fc_density_energy.z = velocity.z*de_p;
}
__device__ inline void compute_velocity(float& density, float3& momentum, float3& velocity)
{
velocity.x = momentum.x / density;
velocity.y = momentum.y / density;
velocity.z = momentum.z / density;
}
__device__ inline float compute_speed_sqd(float3& velocity)
{
return velocity.x*velocity.x + velocity.y*velocity.y + velocity.z*velocity.z;
}
__device__ inline float compute_pressure(float& density, float& density_energy, float& speed_sqd)
{
return (float(GAMMA)-float(1.0f))*(density_energy - float(0.5f)*density*speed_sqd);
}
__device__ inline float compute_speed_of_sound(float& density, float& pressure)
{
return sqrtf(float(GAMMA)*pressure/density);
}
__global__ void cuda_compute_step_factor(int nelr, float* variables, float* areas, float* step_factors)
{
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
float density = variables[i + VAR_DENSITY*nelr];
float3 momentum;
momentum.x = variables[i + (VAR_MOMENTUM+0)*nelr];
momentum.y = variables[i + (VAR_MOMENTUM+1)*nelr];
momentum.z = variables[i + (VAR_MOMENTUM+2)*nelr];
float density_energy = variables[i + VAR_DENSITY_ENERGY*nelr];
float3 velocity; compute_velocity(density, momentum, velocity);
float speed_sqd = compute_speed_sqd(velocity);
float pressure = compute_pressure(density, density_energy, speed_sqd);
float speed_of_sound = compute_speed_of_sound(density, pressure);
// dt = float(0.5f) * sqrtf(areas[i]) / (||v|| + c).... but when we do time stepping, this later would need to be divided by the area, so we just do it all at once
step_factors[i] = float(0.5f) / (sqrtf(areas[i]) * (sqrtf(speed_sqd) + speed_of_sound));
}
void compute_step_factor(int nelr, float* variables, float* areas, float* step_factors)
{
dim3 Dg(nelr / BLOCK_SIZE_2), Db(BLOCK_SIZE_2);
cudaEventRecord(start, 0);
cuda_compute_step_factor<<<Dg, Db>>>(nelr, variables, areas, step_factors);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
kernelTime += elapsed * 1.e-3;
CHECK_CUDA_ERROR();
}
/*
*
*
*/
__global__ void cuda_compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes)
{
const float smoothing_coefficient = float(0.2f);
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
int j, nb;
float3 normal; float normal_len;
float factor;
float density_i = variables[i + VAR_DENSITY*nelr];
float3 momentum_i;
momentum_i.x = variables[i + (VAR_MOMENTUM+0)*nelr];
momentum_i.y = variables[i + (VAR_MOMENTUM+1)*nelr];
momentum_i.z = variables[i + (VAR_MOMENTUM+2)*nelr];
float density_energy_i = variables[i + VAR_DENSITY_ENERGY*nelr];
float3 velocity_i; compute_velocity(density_i, momentum_i, velocity_i);
float speed_sqd_i = compute_speed_sqd(velocity_i);
float speed_i = sqrtf(speed_sqd_i);
float pressure_i = compute_pressure(density_i, density_energy_i, speed_sqd_i);
float speed_of_sound_i = compute_speed_of_sound(density_i, pressure_i);
float3 flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z;
float3 flux_contribution_i_density_energy;
compute_flux_contribution(density_i, momentum_i, density_energy_i, pressure_i, velocity_i, flux_contribution_i_momentum_x, flux_contribution_i_momentum_y, flux_contribution_i_momentum_z, flux_contribution_i_density_energy);
float flux_i_density = float(0.0f);
float3 flux_i_momentum;
flux_i_momentum.x = float(0.0f);
flux_i_momentum.y = float(0.0f);
flux_i_momentum.z = float(0.0f);
float flux_i_density_energy = float(0.0f);
float3 velocity_nb;
float density_nb, density_energy_nb;
float3 momentum_nb;
float3 flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z;
float3 flux_contribution_nb_density_energy;
float speed_sqd_nb, speed_of_sound_nb, pressure_nb;
#pragma unroll
for(j = 0; j < NNB; j++)
{
nb = elements_surrounding_elements[i + j*nelr];
normal.x = normals[i + (j + 0*NNB)*nelr];
normal.y = normals[i + (j + 1*NNB)*nelr];
normal.z = normals[i + (j + 2*NNB)*nelr];
normal_len = sqrtf(normal.x*normal.x + normal.y*normal.y + normal.z*normal.z);
if(nb >= 0) // a legitimate neighbor
{
density_nb = variables[nb + VAR_DENSITY*nelr];
momentum_nb.x = variables[nb + (VAR_MOMENTUM+0)*nelr];
momentum_nb.y = variables[nb + (VAR_MOMENTUM+1)*nelr];
momentum_nb.z = variables[nb + (VAR_MOMENTUM+2)*nelr];
density_energy_nb = variables[nb + VAR_DENSITY_ENERGY*nelr];
compute_velocity(density_nb, momentum_nb, velocity_nb);
speed_sqd_nb = compute_speed_sqd(velocity_nb);
pressure_nb = compute_pressure(density_nb, density_energy_nb, speed_sqd_nb);
speed_of_sound_nb = compute_speed_of_sound(density_nb, pressure_nb);
compute_flux_contribution(density_nb, momentum_nb, density_energy_nb, pressure_nb, velocity_nb, flux_contribution_nb_momentum_x, flux_contribution_nb_momentum_y, flux_contribution_nb_momentum_z, flux_contribution_nb_density_energy);
// artificial viscosity
factor = -normal_len*smoothing_coefficient*float(0.5f)*(speed_i + sqrtf(speed_sqd_nb) + speed_of_sound_i + speed_of_sound_nb);
flux_i_density += factor*(density_i-density_nb);
flux_i_density_energy += factor*(density_energy_i-density_energy_nb);
flux_i_momentum.x += factor*(momentum_i.x-momentum_nb.x);
flux_i_momentum.y += factor*(momentum_i.y-momentum_nb.y);
flux_i_momentum.z += factor*(momentum_i.z-momentum_nb.z);
// accumulate cell-centered fluxes
factor = float(0.5f)*normal.x;
flux_i_density += factor*(momentum_nb.x+momentum_i.x);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.x+flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.x+flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.x+flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(momentum_nb.y+momentum_i.y);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.y+flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.y+flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.y+flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(momentum_nb.z+momentum_i.z);
flux_i_density_energy += factor*(flux_contribution_nb_density_energy.z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(flux_contribution_nb_momentum_x.z+flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(flux_contribution_nb_momentum_y.z+flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(flux_contribution_nb_momentum_z.z+flux_contribution_i_momentum_z.z);
}
else if(nb == -1) // a wing boundary
{
flux_i_momentum.x += normal.x*pressure_i;
flux_i_momentum.y += normal.y*pressure_i;
flux_i_momentum.z += normal.z*pressure_i;
}
else if(nb == -2) // a far field boundary
{
factor = float(0.5f)*normal.x;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+0]+momentum_i.x);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].x+flux_contribution_i_density_energy.x);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].x + flux_contribution_i_momentum_x.x);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].x + flux_contribution_i_momentum_y.x);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].x + flux_contribution_i_momentum_z.x);
factor = float(0.5f)*normal.y;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+1]+momentum_i.y);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].y+flux_contribution_i_density_energy.y);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].y + flux_contribution_i_momentum_x.y);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].y + flux_contribution_i_momentum_y.y);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].y + flux_contribution_i_momentum_z.y);
factor = float(0.5f)*normal.z;
flux_i_density += factor*(ff_variable[VAR_MOMENTUM+2]+momentum_i.z);
flux_i_density_energy += factor*(ff_flux_contribution_density_energy[0].z+flux_contribution_i_density_energy.z);
flux_i_momentum.x += factor*(ff_flux_contribution_momentum_x[0].z + flux_contribution_i_momentum_x.z);
flux_i_momentum.y += factor*(ff_flux_contribution_momentum_y[0].z + flux_contribution_i_momentum_y.z);
flux_i_momentum.z += factor*(ff_flux_contribution_momentum_z[0].z + flux_contribution_i_momentum_z.z);
}
}
fluxes[i + VAR_DENSITY*nelr] = flux_i_density;
fluxes[i + (VAR_MOMENTUM+0)*nelr] = flux_i_momentum.x;
fluxes[i + (VAR_MOMENTUM+1)*nelr] = flux_i_momentum.y;
fluxes[i + (VAR_MOMENTUM+2)*nelr] = flux_i_momentum.z;
fluxes[i + VAR_DENSITY_ENERGY*nelr] = flux_i_density_energy;
}
void compute_flux(int nelr, int* elements_surrounding_elements, float* normals, float* variables, float* fluxes)
{
dim3 Dg(nelr / BLOCK_SIZE_3), Db(BLOCK_SIZE_3);
cudaEventRecord(start, 0);
cuda_compute_flux<<<Dg,Db>>>(nelr, elements_surrounding_elements, normals, variables, fluxes);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
kernelTime += elapsed * 1.e-3;
CHECK_CUDA_ERROR();
}
__global__ void cuda_time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes)
{
const int i = (blockDim.x*blockIdx.x + threadIdx.x);
float factor = step_factors[i]/float(RK+1-j);
variables[i + VAR_DENSITY*nelr] = old_variables[i + VAR_DENSITY*nelr] + factor*fluxes[i + VAR_DENSITY*nelr];
variables[i + VAR_DENSITY_ENERGY*nelr] = old_variables[i + VAR_DENSITY_ENERGY*nelr] + factor*fluxes[i + VAR_DENSITY_ENERGY*nelr];
variables[i + (VAR_MOMENTUM+0)*nelr] = old_variables[i + (VAR_MOMENTUM+0)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+0)*nelr];
variables[i + (VAR_MOMENTUM+1)*nelr] = old_variables[i + (VAR_MOMENTUM+1)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+1)*nelr];
variables[i + (VAR_MOMENTUM+2)*nelr] = old_variables[i + (VAR_MOMENTUM+2)*nelr] + factor*fluxes[i + (VAR_MOMENTUM+2)*nelr];
}
void time_step(int j, int nelr, float* old_variables, float* variables, float* step_factors, float* fluxes)
{
dim3 Dg(nelr / BLOCK_SIZE_4), Db(BLOCK_SIZE_4);
cudaEventRecord(start, 0);
cuda_time_step<<<Dg,Db>>>(j, nelr, old_variables, variables, step_factors, fluxes);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
kernelTime += elapsed * 1.e-3;
CHECK_CUDA_ERROR();
}
void addBenchmarkSpecOptions(OptionParser &op) {
}
void cfd(ResultDatabase &resultDB, OptionParser &op);
void RunBenchmark(ResultDatabase &resultDB, OptionParser &op) {
printf("Running CFDSolver\n");
bool quiet = op.getOptionBool("quiet");
if(!quiet) {
printf("WG size of kernel:initialize = %d, WG size of kernel:compute_step_factor = %d, WG size of kernel:compute_flux = %d, WG size of kernel:time_step = %d\n", BLOCK_SIZE_1, BLOCK_SIZE_2, BLOCK_SIZE_3, BLOCK_SIZE_4);
}
cudaEventCreate(&start);
cudaEventCreate(&stop);
int passes = op.getOptionInt("passes");
for(int i = 0; i < passes; i++) {
kernelTime = 0.0f;
transferTime = 0.0f;
if(!quiet) {
printf("Pass %d:\n", i);
}
cfd(resultDB, op);
if(!quiet) {
printf("Done.\n");
}
}
}
void cfd(ResultDatabase &resultDB, OptionParser &op)
{
// set far field conditions and load them into constant memory on the gpu
{
float h_ff_variable[NVAR];
const float angle_of_attack = float(3.1415926535897931 / 180.0f) * float(deg_angle_of_attack);
h_ff_variable[VAR_DENSITY] = float(1.4);
float ff_pressure = float(1.0f);
float ff_speed_of_sound = sqrt(GAMMA*ff_pressure / h_ff_variable[VAR_DENSITY]);
float ff_speed = float(ff_mach)*ff_speed_of_sound;
float3 ff_velocity;
ff_velocity.x = ff_speed*float(cos((float)angle_of_attack));
ff_velocity.y = ff_speed*float(sin((float)angle_of_attack));
ff_velocity.z = 0.0f;
h_ff_variable[VAR_MOMENTUM+0] = h_ff_variable[VAR_DENSITY] * ff_velocity.x;
h_ff_variable[VAR_MOMENTUM+1] = h_ff_variable[VAR_DENSITY] * ff_velocity.y;
h_ff_variable[VAR_MOMENTUM+2] = h_ff_variable[VAR_DENSITY] * ff_velocity.z;
h_ff_variable[VAR_DENSITY_ENERGY] = h_ff_variable[VAR_DENSITY]*(float(0.5f)*(ff_speed*ff_speed)) + (ff_pressure / float(GAMMA-1.0f));
float3 h_ff_momentum;
h_ff_momentum.x = *(h_ff_variable+VAR_MOMENTUM+0);
h_ff_momentum.y = *(h_ff_variable+VAR_MOMENTUM+1);
h_ff_momentum.z = *(h_ff_variable+VAR_MOMENTUM+2);
float3 h_ff_flux_contribution_momentum_x;
float3 h_ff_flux_contribution_momentum_y;
float3 h_ff_flux_contribution_momentum_z;
float3 h_ff_flux_contribution_density_energy;
compute_flux_contribution(h_ff_variable[VAR_DENSITY], h_ff_momentum, h_ff_variable[VAR_DENSITY_ENERGY], ff_pressure, ff_velocity, h_ff_flux_contribution_momentum_x, h_ff_flux_contribution_momentum_y, h_ff_flux_contribution_momentum_z, h_ff_flux_contribution_density_energy);
// copy far field conditions to the gpu
cudaEventRecord(start, 0);
CUDA_SAFE_CALL( cudaMemcpyToSymbol(ff_variable, h_ff_variable, NVAR*sizeof(float)) );
CUDA_SAFE_CALL( cudaMemcpyToSymbol(ff_flux_contribution_momentum_x, &h_ff_flux_contribution_momentum_x, sizeof(float3)) );
CUDA_SAFE_CALL( cudaMemcpyToSymbol(ff_flux_contribution_momentum_y, &h_ff_flux_contribution_momentum_y, sizeof(float3)) );
CUDA_SAFE_CALL( cudaMemcpyToSymbol(ff_flux_contribution_momentum_z, &h_ff_flux_contribution_momentum_z, sizeof(float3)) );
CUDA_SAFE_CALL( cudaMemcpyToSymbol(ff_flux_contribution_density_energy, &h_ff_flux_contribution_density_energy, sizeof(float3)) );
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
transferTime += elapsed * 1.e-3;
}
int nel;
int nelr;
// read in domain geometry
float* areas;
int* elements_surrounding_elements;
float* normals;
{
string inputFile = op.getOptionString("inputFile");
std::ifstream file(inputFile.c_str());
if(inputFile != "") {
file >> nel;
} else {
int problemSizes[4] = {97000, 200000, 40000000, 60000000};
nel = problemSizes[op.getOptionInt("size") - 1];
}
nelr = BLOCK_SIZE_0*((nel / BLOCK_SIZE_0 )+ std::min(1, nel % BLOCK_SIZE_0));
float* h_areas = new float[nelr];
int* h_elements_surrounding_elements = new int[nelr*NNB];
float* h_normals = new float[nelr*NDIM*NNB];
srand(SEED);
// read in data
for(int i = 0; i < nel; i++)
{
if(inputFile != "") {
file >> h_areas[i];
} else {
h_areas[i] = 1.0 * rand() / RAND_MAX;
}
for(int j = 0; j < NNB; j++) // NNB is always 4
{
if(inputFile != "") {
file >> h_elements_surrounding_elements[i + j*nelr];
} else {
int val = i + (rand() % 20) - 10;
h_elements_surrounding_elements[i + j * nelr] = val;
}
if(h_elements_surrounding_elements[i+j*nelr] < 0) h_elements_surrounding_elements[i+j*nelr] = -1;
h_elements_surrounding_elements[i + j*nelr]--; //it's coming in with Fortran numbering
for(int k = 0; k < NDIM; k++) // NDIM is always 3
{
if(inputFile != "") {
file >> h_normals[i + (j + k*NNB)*nelr];
} else {
h_normals[i + (j + k*NNB)*nelr] = 1.0 * rand() / RAND_MAX - 0.5;
}
h_normals[i + (j + k*NNB)*nelr] = -h_normals[i + (j + k*NNB)*nelr];
}
}
}
// fill in remaining data
int last = nel-1;
for(int i = nel; i < nelr; i++)
{
h_areas[i] = h_areas[last];
for(int j = 0; j < NNB; j++)
{
// duplicate the last element
h_elements_surrounding_elements[i + j*nelr] = h_elements_surrounding_elements[last + j*nelr];
for(int k = 0; k < NDIM; k++) h_normals[last + (j + k*NNB)*nelr] = h_normals[last + (j + k*NNB)*nelr];
}
}
areas = alloc<float>(nelr);
upload<float>(areas, h_areas, nelr);
elements_surrounding_elements = alloc<int>(nelr*NNB);
upload<int>(elements_surrounding_elements, h_elements_surrounding_elements, nelr*NNB);
normals = alloc<float>(nelr*NDIM*NNB);
upload<float>(normals, h_normals, nelr*NDIM*NNB);
delete[] h_areas;
delete[] h_elements_surrounding_elements;
delete[] h_normals;
}
// Create arrays and set initial conditions
float* variables = alloc<float>(nelr*NVAR);
initialize_variables(nelr, variables);
float* old_variables = alloc<float>(nelr*NVAR);
float* fluxes = alloc<float>(nelr*NVAR);
float* step_factors = alloc<float>(nelr);
// make sure all memory is floatly allocated before we start timing
initialize_variables(nelr, old_variables);
initialize_variables(nelr, fluxes);
cudaMemset( (void*) step_factors, 0, sizeof(float)*nelr );
// make sure CUDA isn't still doing something before we start timing
cudaThreadSynchronize();
// these need to be computed the first time in order to compute time step
// unsigned int timer = 0;
// CUT_SAFE_CALL( cutCreateTimer( &timer));
// CUT_SAFE_CALL( cutStartTimer( timer));
// Begin iterations
for(int i = 0; i < iterations; i++)
{
copy<float>(old_variables, variables, nelr*NVAR);
// for the first iteration we compute the time step
compute_step_factor(nelr, variables, areas, step_factors);
CHECK_CUDA_ERROR();
for(int j = 0; j < RK; j++)
{
compute_flux(nelr, elements_surrounding_elements, normals, variables, fluxes);
CHECK_CUDA_ERROR();
time_step(j, nelr, old_variables, variables, step_factors, fluxes);
CHECK_CUDA_ERROR();
}
}
cudaThreadSynchronize();
// CUT_SAFE_CALL( cutStopTimer(timer) );
if(op.getOptionBool("verbose")) {
dump(variables, nel, nelr);
}
dealloc<float>(areas);
dealloc<int>(elements_surrounding_elements);
dealloc<float>(normals);
dealloc<float>(variables);
dealloc<float>(old_variables);
dealloc<float>(fluxes);
dealloc<float>(step_factors);
char atts[1024];
sprintf(atts, "numelements:%d", nel);
resultDB.AddResult("cfd_kernel_time", atts, "sec", kernelTime);
resultDB.AddResult("cfd_transfer_time", atts, "sec", transferTime);
resultDB.AddResult("cfd_parity", atts, "N", transferTime / kernelTime);
resultDB.AddOverall("Time", "sec", kernelTime+transferTime);
}
|
33fd486cdfdafcfb82b21f214fc030f758c5996b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "funcs_cuda.cuh"
// Number of threads in each thread block
const int blockSize = 128;
void initializeCUDA(int argc, char **argv, int &devID)
{
findCudaDevice(argc, (const char **)argv);
// // By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
// hipError_t error;
// devID = 0;
//
// if (checkCmdLineFlag(argc, (const char **)argv, "device"))
// {
// devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
// error = hipSetDevice(devID);
//
// if (error != hipSuccess)
// {
// printf("hipSetDevice returned error code %d, line(%d)\n", error, __LINE__);
// exit(EXIT_FAILURE);
// }
// }
//
// // get number of SMs on this GPU
// error = hipGetDevice(&devID);
//
// if (error != hipSuccess)
// {
// printf("hipGetDevice returned error code %d, line(%d)\n", error, __LINE__);
// exit(EXIT_FAILURE);
// }
//
// hipDeviceProp_t deviceProp;
//
// error = hipGetDeviceProperties(&deviceProp, devID);
//
// if (error != hipSuccess)
// {
// printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
// exit(EXIT_FAILURE);
// }
//
// printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
__global__ void VectorAdd_Kernel(const double *a, const double *b, double *c, const int n) {
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n) {
c[id] = a[id] + b[id];
}
}
void VectorAdd_GPU(const double *h_a, const double *h_b, double *h_c, const int n) {
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Device input vectors
double *dd_a;
double *dd_b;
//Device output vector
double *dd_c;
// Allocate memory for each vector on GPU
hipMalloc(&dd_a, bytes);
hipMalloc(&dd_b, bytes);
hipMalloc(&dd_c, bytes);
// Copy host vectors to device
hipMemcpy( dd_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy( dd_b, h_b, bytes, hipMemcpyHostToDevice);
// Number of threads in each thread block
// int blockSize = 10000;
// Number of thread blocks in grid
int gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
hipLaunchKernelGGL(( VectorAdd_Kernel), dim3(gridSize), dim3(blockSize), 0, 0, dd_a, dd_b, dd_c, n);
hipMemcpy( h_c, dd_c, bytes, hipMemcpyDeviceToHost );
// Release device memory
hipFree(dd_a);
hipFree(dd_b);
hipFree(dd_c);
}
void VectorsClass::VectorAdd_GPU_InClass(double *h_a, double *h_b, double *h_c, int n) {
// Copy host vectors to device
hipMemcpy( this->d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy( this->d_b, h_b, bytes, hipMemcpyHostToDevice);
// Number of threads in each thread block
// int blockSize = 10000;
// Number of thread blocks in grid
int gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
hipLaunchKernelGGL(( VectorAdd_Kernel), dim3(gridSize), dim3(blockSize), 0, 0, this->d_a, this->d_b, this->d_c, n);
hipDeviceSynchronize();
hipMemcpy( h_c, this->d_c, bytes, hipMemcpyDeviceToHost );
// Release device memory
}
void VectorsClass::VectorAdd_GPU_InClass(double *h_c, int n) {
// Number of thread blocks in grid
int gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
hipLaunchKernelGGL(( VectorAdd_Kernel), dim3(gridSize), dim3(blockSize), 0, 0, this->d_a, this->d_b, this->d_c, n);
std::cout << "bytes is " << this->bytes << std::endl;
checkCudaErrors(hipMemcpy( h_c, this->d_c, this->bytes, hipMemcpyDeviceToHost ));
// for (int i {0}; i < 10; ++i) {
// std::cout << h_c[i] << std::endl;
// }
// Release device memory
} | 33fd486cdfdafcfb82b21f214fc030f758c5996b.cu | #include <iostream>
#include "funcs_cuda.cuh"
// Number of threads in each thread block
const int blockSize = 128;
void initializeCUDA(int argc, char **argv, int &devID)
{
findCudaDevice(argc, (const char **)argv);
// // By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
// cudaError_t error;
// devID = 0;
//
// if (checkCmdLineFlag(argc, (const char **)argv, "device"))
// {
// devID = getCmdLineArgumentInt(argc, (const char **)argv, "device");
// error = cudaSetDevice(devID);
//
// if (error != cudaSuccess)
// {
// printf("cudaSetDevice returned error code %d, line(%d)\n", error, __LINE__);
// exit(EXIT_FAILURE);
// }
// }
//
// // get number of SMs on this GPU
// error = cudaGetDevice(&devID);
//
// if (error != cudaSuccess)
// {
// printf("cudaGetDevice returned error code %d, line(%d)\n", error, __LINE__);
// exit(EXIT_FAILURE);
// }
//
// cudaDeviceProp deviceProp;
//
// error = cudaGetDeviceProperties(&deviceProp, devID);
//
// if (error != cudaSuccess)
// {
// printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
// exit(EXIT_FAILURE);
// }
//
// printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor);
}
__global__ void VectorAdd_Kernel(const double *a, const double *b, double *c, const int n) {
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n) {
c[id] = a[id] + b[id];
}
}
void VectorAdd_GPU(const double *h_a, const double *h_b, double *h_c, const int n) {
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Device input vectors
double *dd_a;
double *dd_b;
//Device output vector
double *dd_c;
// Allocate memory for each vector on GPU
cudaMalloc(&dd_a, bytes);
cudaMalloc(&dd_b, bytes);
cudaMalloc(&dd_c, bytes);
// Copy host vectors to device
cudaMemcpy( dd_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( dd_b, h_b, bytes, cudaMemcpyHostToDevice);
// Number of threads in each thread block
// int blockSize = 10000;
// Number of thread blocks in grid
int gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
VectorAdd_Kernel<<<gridSize, blockSize>>>(dd_a, dd_b, dd_c, n);
cudaMemcpy( h_c, dd_c, bytes, cudaMemcpyDeviceToHost );
// Release device memory
cudaFree(dd_a);
cudaFree(dd_b);
cudaFree(dd_c);
}
void VectorsClass::VectorAdd_GPU_InClass(double *h_a, double *h_b, double *h_c, int n) {
// Copy host vectors to device
cudaMemcpy( this->d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( this->d_b, h_b, bytes, cudaMemcpyHostToDevice);
// Number of threads in each thread block
// int blockSize = 10000;
// Number of thread blocks in grid
int gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
VectorAdd_Kernel<<<gridSize, blockSize>>>(this->d_a, this->d_b, this->d_c, n);
cudaThreadSynchronize();
cudaMemcpy( h_c, this->d_c, bytes, cudaMemcpyDeviceToHost );
// Release device memory
}
void VectorsClass::VectorAdd_GPU_InClass(double *h_c, int n) {
// Number of thread blocks in grid
int gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
VectorAdd_Kernel<<<gridSize, blockSize>>>(this->d_a, this->d_b, this->d_c, n);
std::cout << "bytes is " << this->bytes << std::endl;
checkCudaErrors(cudaMemcpy( h_c, this->d_c, this->bytes, cudaMemcpyDeviceToHost ));
// for (int i {0}; i < 10; ++i) {
// std::cout << h_c[i] << std::endl;
// }
// Release device memory
} |
fc735562331dbfcc04f2c61bbd5d6cb455f07b1e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void nllLoss_grad(int x_stride, float *yGrad, int* target, float* xGrad) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int offset = tid * x_stride + target[tid];
xGrad[offset] += -1 * yGrad[tid];
} | fc735562331dbfcc04f2c61bbd5d6cb455f07b1e.cu | #include "includes.h"
__global__ void nllLoss_grad(int x_stride, float *yGrad, int* target, float* xGrad) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int offset = tid * x_stride + target[tid];
xGrad[offset] += -1 * yGrad[tid];
} |
3c5988693c5ace0ff0eb10c20c02758bb07edd20.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cupti.h>
#include <stdio.h>
#include <assert.h>
#define CUPTI_CALL(call) \
do { \
CUptiResult _status = call; \
if (_status != CUPTI_SUCCESS) { \
const char *errstr; \
cuptiGetResultString(_status, &errstr); \
fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \
__FILE__, __LINE__, #call, errstr); \
exit(-1); \
} \
} while (0)
#define DRIVER_API_CALL(apiFuncCall) \
do { \
hipError_t _status = apiFuncCall; \
if (_status != hipSuccess) { \
fprintf(stderr, "%s:%d: error: function %s failed with error %d.\n", \
__FILE__, __LINE__, #apiFuncCall, _status); \
exit(-1); \
} \
} while (0)
#define RUNTIME_API_CALL(apiFuncCall) \
do { \
hipError_t _status = apiFuncCall; \
if (_status != hipSuccess) { \
fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \
__FILE__, __LINE__, #apiFuncCall, hipGetErrorString(_status)); \
exit(-1); \
} \
} while (0)
typedef char cuptiAttrString_t[256];
typedef char cuptiAttrLargeString_t[1024];
/*
* CUPTI_EVENT_CATEGORY_INSTRUCTION = 0
An instruction related event.
CUPTI_EVENT_CATEGORY_MEMORY = 1
A memory related event.
CUPTI_EVENT_CATEGORY_CACHE = 2
A cache related event.
CUPTI_EVENT_CATEGORY_PROFILE_TRIGGER = 3
A profile-trigger event.
CUPTI_EVENT_CATEGORY_SYSTEM = 4
*/
static const char* g_categories[5] = {
"CUPTI_EVENT_CATEGORY_INSTRUCTION",
"CUPTI_EVENT_CATEGORY_MEMORY",
"CUPTI_EVENT_CATEGORY_CACHE",
"CUPTI_EVENT_CATEGORY_PROFILE_TRIGGER",
"CUPTI_EVENT_CATEGORY_SYSTEM"
};
static void cuptiInfo(hipDevice_t device) {
uint32_t numDomains = 0;
CUPTI_CALL(cuptiDeviceGetNumEventDomains(device, &numDomains));
CUpti_EventDomainID* domainIDs = (CUpti_EventDomainID*) malloc(sizeof(CUpti_EventDomainID) * numDomains);
assert(domainIDs != NULL);
size_t szNumDomains = (size_t)(numDomains) * sizeof(CUpti_EventDomainID);
CUPTI_CALL(cuptiDeviceEnumEventDomains(device, &szNumDomains, domainIDs));
szNumDomains /= sizeof(CUpti_EventDomainID);
printf("Domain Count: %lu\n", szNumDomains);
for (size_t i = 0; i < szNumDomains; ++i) {
CUpti_EventDomainID id = domainIDs[i];
uint32_t numEvents = 0;
CUPTI_CALL(cuptiEventDomainGetNumEvents(id, &numEvents));
CUpti_EventID* eventIDs = (CUpti_EventID*) malloc(sizeof(CUpti_EventID) * numEvents);
assert(eventIDs != NULL);
size_t szNumEvents = ((size_t)numEvents) * sizeof(CUpti_EventID);
CUPTI_CALL(cuptiEventDomainEnumEvents(id, &szNumEvents, eventIDs));
szNumEvents /= sizeof(CUpti_EventID);
cuptiAttrString_t dname = {0};
size_t dname_len = sizeof(dname) - 1;
CUPTI_CALL(cuptiEventDomainGetAttribute(id, CUPTI_EVENT_DOMAIN_ATTR_NAME, &dname_len, dname));
printf("ID: %u. Domain Name: %s. Count: %lu\n", id, dname, szNumEvents);
for (size_t j = 0; j < szNumEvents; ++j) {
CUpti_EventID eid = eventIDs[j];
cuptiAttrString_t name = {0};
size_t len = sizeof(name) - 1;
CUPTI_CALL(cuptiEventGetAttribute(eid, CUPTI_EVENT_ATTR_NAME, &len, name));
cuptiAttrLargeString_t desc = {0};
len = sizeof(desc) - 1;
CUPTI_CALL(cuptiEventGetAttribute(eid, CUPTI_EVENT_ATTR_LONG_DESCRIPTION, &len, desc));
CUpti_EventCategory cat;;
len = sizeof(cat);
CUPTI_CALL(cuptiEventGetAttribute(eid, CUPTI_EVENT_ATTR_CATEGORY, &len, &cat));
assert(0 <= (int)cat && (int)cat < 5);
printf("\n\tName: %s\n\t\tID: %i\n\t\tDescription: %s\n\t\tCategory: %s\n\n",
name,
eid,
desc,
g_categories[(int)cat]
);
}
free(eventIDs);
}
free(domainIDs);
}
int main(int argc, char** argv) {
hipDevice_t device;
char deviceName[128] = {0};
int deviceNum = 0, devCount = 0;
DRIVER_API_CALL(hipInit(0));
RUNTIME_API_CALL(hipGetDeviceCount(&devCount));
for (deviceNum=0; deviceNum<devCount; deviceNum++) {
DRIVER_API_CALL(hipDeviceGet(&device, deviceNum));
DRIVER_API_CALL(hipDeviceGetName(deviceName, 32, device));
printf("Device Name: %s\n", deviceName);
RUNTIME_API_CALL(hipSetDevice(deviceNum));
cuptiInfo(device);
RUNTIME_API_CALL(hipDeviceSynchronize());
RUNTIME_API_CALL(hipDeviceReset());
// CUPTI_CALL(cuptiActivityFlushAll(0));
}
return 0;
}
| 3c5988693c5ace0ff0eb10c20c02758bb07edd20.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <cupti.h>
#include <stdio.h>
#include <assert.h>
#define CUPTI_CALL(call) \
do { \
CUptiResult _status = call; \
if (_status != CUPTI_SUCCESS) { \
const char *errstr; \
cuptiGetResultString(_status, &errstr); \
fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \
__FILE__, __LINE__, #call, errstr); \
exit(-1); \
} \
} while (0)
#define DRIVER_API_CALL(apiFuncCall) \
do { \
CUresult _status = apiFuncCall; \
if (_status != CUDA_SUCCESS) { \
fprintf(stderr, "%s:%d: error: function %s failed with error %d.\n", \
__FILE__, __LINE__, #apiFuncCall, _status); \
exit(-1); \
} \
} while (0)
#define RUNTIME_API_CALL(apiFuncCall) \
do { \
cudaError_t _status = apiFuncCall; \
if (_status != cudaSuccess) { \
fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \
__FILE__, __LINE__, #apiFuncCall, cudaGetErrorString(_status)); \
exit(-1); \
} \
} while (0)
typedef char cuptiAttrString_t[256];
typedef char cuptiAttrLargeString_t[1024];
/*
* CUPTI_EVENT_CATEGORY_INSTRUCTION = 0
An instruction related event.
CUPTI_EVENT_CATEGORY_MEMORY = 1
A memory related event.
CUPTI_EVENT_CATEGORY_CACHE = 2
A cache related event.
CUPTI_EVENT_CATEGORY_PROFILE_TRIGGER = 3
A profile-trigger event.
CUPTI_EVENT_CATEGORY_SYSTEM = 4
*/
static const char* g_categories[5] = {
"CUPTI_EVENT_CATEGORY_INSTRUCTION",
"CUPTI_EVENT_CATEGORY_MEMORY",
"CUPTI_EVENT_CATEGORY_CACHE",
"CUPTI_EVENT_CATEGORY_PROFILE_TRIGGER",
"CUPTI_EVENT_CATEGORY_SYSTEM"
};
static void cuptiInfo(CUdevice device) {
uint32_t numDomains = 0;
CUPTI_CALL(cuptiDeviceGetNumEventDomains(device, &numDomains));
CUpti_EventDomainID* domainIDs = (CUpti_EventDomainID*) malloc(sizeof(CUpti_EventDomainID) * numDomains);
assert(domainIDs != NULL);
size_t szNumDomains = (size_t)(numDomains) * sizeof(CUpti_EventDomainID);
CUPTI_CALL(cuptiDeviceEnumEventDomains(device, &szNumDomains, domainIDs));
szNumDomains /= sizeof(CUpti_EventDomainID);
printf("Domain Count: %lu\n", szNumDomains);
for (size_t i = 0; i < szNumDomains; ++i) {
CUpti_EventDomainID id = domainIDs[i];
uint32_t numEvents = 0;
CUPTI_CALL(cuptiEventDomainGetNumEvents(id, &numEvents));
CUpti_EventID* eventIDs = (CUpti_EventID*) malloc(sizeof(CUpti_EventID) * numEvents);
assert(eventIDs != NULL);
size_t szNumEvents = ((size_t)numEvents) * sizeof(CUpti_EventID);
CUPTI_CALL(cuptiEventDomainEnumEvents(id, &szNumEvents, eventIDs));
szNumEvents /= sizeof(CUpti_EventID);
cuptiAttrString_t dname = {0};
size_t dname_len = sizeof(dname) - 1;
CUPTI_CALL(cuptiEventDomainGetAttribute(id, CUPTI_EVENT_DOMAIN_ATTR_NAME, &dname_len, dname));
printf("ID: %u. Domain Name: %s. Count: %lu\n", id, dname, szNumEvents);
for (size_t j = 0; j < szNumEvents; ++j) {
CUpti_EventID eid = eventIDs[j];
cuptiAttrString_t name = {0};
size_t len = sizeof(name) - 1;
CUPTI_CALL(cuptiEventGetAttribute(eid, CUPTI_EVENT_ATTR_NAME, &len, name));
cuptiAttrLargeString_t desc = {0};
len = sizeof(desc) - 1;
CUPTI_CALL(cuptiEventGetAttribute(eid, CUPTI_EVENT_ATTR_LONG_DESCRIPTION, &len, desc));
CUpti_EventCategory cat;;
len = sizeof(cat);
CUPTI_CALL(cuptiEventGetAttribute(eid, CUPTI_EVENT_ATTR_CATEGORY, &len, &cat));
assert(0 <= (int)cat && (int)cat < 5);
printf("\n\tName: %s\n\t\tID: %i\n\t\tDescription: %s\n\t\tCategory: %s\n\n",
name,
eid,
desc,
g_categories[(int)cat]
);
}
free(eventIDs);
}
free(domainIDs);
}
int main(int argc, char** argv) {
CUdevice device;
char deviceName[128] = {0};
int deviceNum = 0, devCount = 0;
DRIVER_API_CALL(cuInit(0));
RUNTIME_API_CALL(cudaGetDeviceCount(&devCount));
for (deviceNum=0; deviceNum<devCount; deviceNum++) {
DRIVER_API_CALL(cuDeviceGet(&device, deviceNum));
DRIVER_API_CALL(cuDeviceGetName(deviceName, 32, device));
printf("Device Name: %s\n", deviceName);
RUNTIME_API_CALL(cudaSetDevice(deviceNum));
cuptiInfo(device);
RUNTIME_API_CALL(cudaDeviceSynchronize());
RUNTIME_API_CALL(cudaDeviceReset());
// CUPTI_CALL(cuptiActivityFlushAll(0));
}
return 0;
}
|
91d301ed8a0ec3990004edb1958dc38ac901e341.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// RUN: %clang_cc1 -triple nvptx64-unknown-unknown -target-cpu sm_60 \
// RUN: -fcuda-is-device -target-feature +ptx60 \
// RUN: -S -emit-llvm -o - -x cuda %s \
// RUN: | FileCheck -check-prefix=CHECK %s
// RUN: %clang_cc1 -triple nvptx64-unknown-unknown -target-cpu sm_80 \
// RUN: -fcuda-is-device -target-feature +ptx65 \
// RUN: -S -emit-llvm -o - -x cuda %s \
// RUN: | FileCheck -check-prefix=CHECK %s
// RUN: %clang_cc1 -triple nvptx-unknown-unknown -target-cpu sm_60 \
// RUN: -fcuda-is-device -S -o /dev/null -x cuda -verify %s
#define __device__ __attribute__((device))
#define __global__ __attribute__((global))
#define __shared__ __attribute__((shared))
#define __constant__ __attribute__((constant))
typedef unsigned long long uint64_t;
// We have to keep all builtins that depend on particular target feature in the
// same function, because the codegen will stop after the very first function
// that encounters an error, so -verify will not be able to find errors in
// subsequent functions.
// CHECK-LABEL: nvvm_sync
__device__ void nvvm_sync(unsigned mask, int i, float f, int a, int b,
bool pred, uint64_t i64) {
// CHECK: call void @llvm.nvvm.bar.warp.sync(i32
// expected-error@+1 {{'__nvvm_bar_warp_sync' needs target feature ptx60}}
__nvvm_bar_warp_sync(mask);
// CHECK: call void @llvm.nvvm.barrier.sync(i32
// expected-error@+1 {{'__nvvm_barrier_sync' needs target feature ptx60}}
__nvvm_barrier_sync(mask);
// CHECK: call void @llvm.nvvm.barrier.sync.cnt(i32
// expected-error@+1 {{'__nvvm_barrier_sync_cnt' needs target feature ptx60}}
__nvvm_barrier_sync_cnt(mask, i);
//
// SHFL.SYNC
//
// CHECK: call i32 @llvm.nvvm.shfl.sync.down.i32(i32 {{%[0-9]+}}, i32
// expected-error@+1 {{'__nvvm_shfl_sync_down_i32' needs target feature ptx60}}
__nvvm_shfl_sync_down_i32(mask, i, a, b);
// CHECK: call float @llvm.nvvm.shfl.sync.down.f32(i32 {{%[0-9]+}}, float
// expected-error@+1 {{'__nvvm_shfl_sync_down_f32' needs target feature ptx60}}
__nvvm_shfl_sync_down_f32(mask, f, a, b);
// CHECK: call i32 @llvm.nvvm.shfl.sync.up.i32(i32 {{%[0-9]+}}, i32
// expected-error@+1 {{'__nvvm_shfl_sync_up_i32' needs target feature ptx60}}
__nvvm_shfl_sync_up_i32(mask, i, a, b);
// CHECK: call float @llvm.nvvm.shfl.sync.up.f32(i32 {{%[0-9]+}}, float
// expected-error@+1 {{'__nvvm_shfl_sync_up_f32' needs target feature ptx60}}
__nvvm_shfl_sync_up_f32(mask, f, a, b);
// CHECK: call i32 @llvm.nvvm.shfl.sync.bfly.i32(i32 {{%[0-9]+}}, i32
// expected-error@+1 {{'__nvvm_shfl_sync_bfly_i32' needs target feature ptx60}}
__nvvm_shfl_sync_bfly_i32(mask, i, a, b);
// CHECK: call float @llvm.nvvm.shfl.sync.bfly.f32(i32 {{%[0-9]+}}, float
// expected-error@+1 {{'__nvvm_shfl_sync_bfly_f32' needs target feature ptx60}}
__nvvm_shfl_sync_bfly_f32(mask, f, a, b);
// CHECK: call i32 @llvm.nvvm.shfl.sync.idx.i32(i32 {{%[0-9]+}}, i32
// expected-error@+1 {{'__nvvm_shfl_sync_idx_i32' needs target feature ptx60}}
__nvvm_shfl_sync_idx_i32(mask, i, a, b);
// CHECK: call float @llvm.nvvm.shfl.sync.idx.f32(i32 {{%[0-9]+}}, float
// expected-error@+1 {{'__nvvm_shfl_sync_idx_f32' needs target feature ptx60}}
__nvvm_shfl_sync_idx_f32(mask, f, a, b);
//
// VOTE.SYNC
//
// CHECK: call i1 @llvm.nvvm.vote.all.sync(i32
// expected-error@+1 {{'__nvvm_vote_all_sync' needs target feature ptx60}}
__nvvm_vote_all_sync(mask, pred);
// CHECK: call i1 @llvm.nvvm.vote.any.sync(i32
// expected-error@+1 {{'__nvvm_vote_any_sync' needs target feature ptx60}}
__nvvm_vote_any_sync(mask, pred);
// CHECK: call i1 @llvm.nvvm.vote.uni.sync(i32
// expected-error@+1 {{'__nvvm_vote_uni_sync' needs target feature ptx60}}
__nvvm_vote_uni_sync(mask, pred);
// CHECK: call i32 @llvm.nvvm.vote.ballot.sync(i32
// expected-error@+1 {{'__nvvm_vote_ballot_sync' needs target feature ptx60}}
__nvvm_vote_ballot_sync(mask, pred);
//
// MATCH.{ALL,ANY}.SYNC
//
// CHECK: call i32 @llvm.nvvm.match.any.sync.i32(i32
// expected-error@+1 {{'__nvvm_match_any_sync_i32' needs target feature ptx60}}
__nvvm_match_any_sync_i32(mask, i);
// CHECK: call i64 @llvm.nvvm.match.any.sync.i64(i32
// expected-error@+1 {{'__nvvm_match_any_sync_i64' needs target feature ptx60}}
__nvvm_match_any_sync_i64(mask, i64);
// CHECK: call { i32, i1 } @llvm.nvvm.match.all.sync.i32p(i32
// expected-error@+1 {{'__nvvm_match_all_sync_i32p' needs target feature ptx60}}
__nvvm_match_all_sync_i32p(mask, i, &i);
// CHECK: call { i64, i1 } @llvm.nvvm.match.all.sync.i64p(i32
// expected-error@+1 {{'__nvvm_match_all_sync_i64p' needs target feature ptx60}}
__nvvm_match_all_sync_i64p(mask, i64, &i);
// CHECK: ret void
}
| 91d301ed8a0ec3990004edb1958dc38ac901e341.cu | // RUN: %clang_cc1 -triple nvptx64-unknown-unknown -target-cpu sm_60 \
// RUN: -fcuda-is-device -target-feature +ptx60 \
// RUN: -S -emit-llvm -o - -x cuda %s \
// RUN: | FileCheck -check-prefix=CHECK %s
// RUN: %clang_cc1 -triple nvptx64-unknown-unknown -target-cpu sm_80 \
// RUN: -fcuda-is-device -target-feature +ptx65 \
// RUN: -S -emit-llvm -o - -x cuda %s \
// RUN: | FileCheck -check-prefix=CHECK %s
// RUN: %clang_cc1 -triple nvptx-unknown-unknown -target-cpu sm_60 \
// RUN: -fcuda-is-device -S -o /dev/null -x cuda -verify %s
#define __device__ __attribute__((device))
#define __global__ __attribute__((global))
#define __shared__ __attribute__((shared))
#define __constant__ __attribute__((constant))
typedef unsigned long long uint64_t;
// We have to keep all builtins that depend on particular target feature in the
// same function, because the codegen will stop after the very first function
// that encounters an error, so -verify will not be able to find errors in
// subsequent functions.
// CHECK-LABEL: nvvm_sync
__device__ void nvvm_sync(unsigned mask, int i, float f, int a, int b,
bool pred, uint64_t i64) {
// CHECK: call void @llvm.nvvm.bar.warp.sync(i32
// expected-error@+1 {{'__nvvm_bar_warp_sync' needs target feature ptx60}}
__nvvm_bar_warp_sync(mask);
// CHECK: call void @llvm.nvvm.barrier.sync(i32
// expected-error@+1 {{'__nvvm_barrier_sync' needs target feature ptx60}}
__nvvm_barrier_sync(mask);
// CHECK: call void @llvm.nvvm.barrier.sync.cnt(i32
// expected-error@+1 {{'__nvvm_barrier_sync_cnt' needs target feature ptx60}}
__nvvm_barrier_sync_cnt(mask, i);
//
// SHFL.SYNC
//
// CHECK: call i32 @llvm.nvvm.shfl.sync.down.i32(i32 {{%[0-9]+}}, i32
// expected-error@+1 {{'__nvvm_shfl_sync_down_i32' needs target feature ptx60}}
__nvvm_shfl_sync_down_i32(mask, i, a, b);
// CHECK: call float @llvm.nvvm.shfl.sync.down.f32(i32 {{%[0-9]+}}, float
// expected-error@+1 {{'__nvvm_shfl_sync_down_f32' needs target feature ptx60}}
__nvvm_shfl_sync_down_f32(mask, f, a, b);
// CHECK: call i32 @llvm.nvvm.shfl.sync.up.i32(i32 {{%[0-9]+}}, i32
// expected-error@+1 {{'__nvvm_shfl_sync_up_i32' needs target feature ptx60}}
__nvvm_shfl_sync_up_i32(mask, i, a, b);
// CHECK: call float @llvm.nvvm.shfl.sync.up.f32(i32 {{%[0-9]+}}, float
// expected-error@+1 {{'__nvvm_shfl_sync_up_f32' needs target feature ptx60}}
__nvvm_shfl_sync_up_f32(mask, f, a, b);
// CHECK: call i32 @llvm.nvvm.shfl.sync.bfly.i32(i32 {{%[0-9]+}}, i32
// expected-error@+1 {{'__nvvm_shfl_sync_bfly_i32' needs target feature ptx60}}
__nvvm_shfl_sync_bfly_i32(mask, i, a, b);
// CHECK: call float @llvm.nvvm.shfl.sync.bfly.f32(i32 {{%[0-9]+}}, float
// expected-error@+1 {{'__nvvm_shfl_sync_bfly_f32' needs target feature ptx60}}
__nvvm_shfl_sync_bfly_f32(mask, f, a, b);
// CHECK: call i32 @llvm.nvvm.shfl.sync.idx.i32(i32 {{%[0-9]+}}, i32
// expected-error@+1 {{'__nvvm_shfl_sync_idx_i32' needs target feature ptx60}}
__nvvm_shfl_sync_idx_i32(mask, i, a, b);
// CHECK: call float @llvm.nvvm.shfl.sync.idx.f32(i32 {{%[0-9]+}}, float
// expected-error@+1 {{'__nvvm_shfl_sync_idx_f32' needs target feature ptx60}}
__nvvm_shfl_sync_idx_f32(mask, f, a, b);
//
// VOTE.SYNC
//
// CHECK: call i1 @llvm.nvvm.vote.all.sync(i32
// expected-error@+1 {{'__nvvm_vote_all_sync' needs target feature ptx60}}
__nvvm_vote_all_sync(mask, pred);
// CHECK: call i1 @llvm.nvvm.vote.any.sync(i32
// expected-error@+1 {{'__nvvm_vote_any_sync' needs target feature ptx60}}
__nvvm_vote_any_sync(mask, pred);
// CHECK: call i1 @llvm.nvvm.vote.uni.sync(i32
// expected-error@+1 {{'__nvvm_vote_uni_sync' needs target feature ptx60}}
__nvvm_vote_uni_sync(mask, pred);
// CHECK: call i32 @llvm.nvvm.vote.ballot.sync(i32
// expected-error@+1 {{'__nvvm_vote_ballot_sync' needs target feature ptx60}}
__nvvm_vote_ballot_sync(mask, pred);
//
// MATCH.{ALL,ANY}.SYNC
//
// CHECK: call i32 @llvm.nvvm.match.any.sync.i32(i32
// expected-error@+1 {{'__nvvm_match_any_sync_i32' needs target feature ptx60}}
__nvvm_match_any_sync_i32(mask, i);
// CHECK: call i64 @llvm.nvvm.match.any.sync.i64(i32
// expected-error@+1 {{'__nvvm_match_any_sync_i64' needs target feature ptx60}}
__nvvm_match_any_sync_i64(mask, i64);
// CHECK: call { i32, i1 } @llvm.nvvm.match.all.sync.i32p(i32
// expected-error@+1 {{'__nvvm_match_all_sync_i32p' needs target feature ptx60}}
__nvvm_match_all_sync_i32p(mask, i, &i);
// CHECK: call { i64, i1 } @llvm.nvvm.match.all.sync.i64p(i32
// expected-error@+1 {{'__nvvm_match_all_sync_i64p' needs target feature ptx60}}
__nvvm_match_all_sync_i64p(mask, i64, &i);
// CHECK: ret void
}
|
6314bf5b0f3fa3e9d396d12b637f48123228ed29.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__ void save_soln_gpu( const double *q, double *qold) {
for (int n = 0; n < 4; n++)
qold[n] = q[n];
}
// CUDA kernel function
__global__ void op_cuda_save_soln(
const double *__restrict arg0,
double *arg1,
int set_size ) {
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
save_soln_gpu(arg0+n*4,
arg1+n*4);
}
}
//host stub function
void op_par_loop_save_soln(char const *name, op_set set,
op_arg arg0,
op_arg arg1){
int nargs = 2;
op_arg args[2];
args[0] = arg0;
args[1] = arg1;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(0);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[0].name = name;
OP_kernels[0].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: save_soln");
}
op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_0
int nthread = OP_BLOCK_SIZE_0;
#else
int nthread = OP_block_size;
#endif
int nblocks = 200;
hipLaunchKernelGGL(( op_cuda_save_soln), dim3(nblocks),dim3(nthread), 0, 0,
(double *) arg0.data_d,
(double *) arg1.data_d,
set->size );
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(hipDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[0].time += wall_t2 - wall_t1;
OP_kernels[0].transfer += (float)set->size * arg0.size;
OP_kernels[0].transfer += (float)set->size * arg1.size * 2.0f;
}
| 6314bf5b0f3fa3e9d396d12b637f48123228ed29.cu | //
// auto-generated by op2.py
//
//user function
__device__ void save_soln_gpu( const double *q, double *qold) {
for (int n = 0; n < 4; n++)
qold[n] = q[n];
}
// CUDA kernel function
__global__ void op_cuda_save_soln(
const double *__restrict arg0,
double *arg1,
int set_size ) {
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
save_soln_gpu(arg0+n*4,
arg1+n*4);
}
}
//host stub function
void op_par_loop_save_soln(char const *name, op_set set,
op_arg arg0,
op_arg arg1){
int nargs = 2;
op_arg args[2];
args[0] = arg0;
args[1] = arg1;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(0);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[0].name = name;
OP_kernels[0].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: save_soln");
}
op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_0
int nthread = OP_BLOCK_SIZE_0;
#else
int nthread = OP_block_size;
#endif
int nblocks = 200;
op_cuda_save_soln<<<nblocks,nthread>>>(
(double *) arg0.data_d,
(double *) arg1.data_d,
set->size );
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(cudaDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[0].time += wall_t2 - wall_t1;
OP_kernels[0].transfer += (float)set->size * arg0.size;
OP_kernels[0].transfer += (float)set->size * arg1.size * 2.0f;
}
|
cb24f4daa128982d383990013a5ababfd08e1d59.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
const float PI = 3.14159265359;
const float HALFPI = 0.5*PI;
texture<unsigned char, hipTextureType3D, hipReadModeElementType> tcExpData;
texture<float, hipTextureType2D, hipReadModeElementType> tfG; // texture to store scattering vectors;
typedef struct {
int iNPixelJ, iNPixelK;
float fPixelJ, fPixelK;
float afCoordOrigin[3];
float afNorm[3];
float afJVector[3];
float afKVector[3];
float fNRot, fAngleStart,fAngleEnd;
} DetInfo;
__global__ void mat_to_euler_ZXZ(float* afMatIn, float* afEulerOut, int iNAngle){
/*
* transform active rotation matrix to euler angles in ZXZ convention, not right(seems right now)
* afMatIn: iNAngle * 9
* afEulerOut: iNAngle* 3
* TEST PASSED
*/
float threshold = 0.9999999;
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<iNAngle){
if(afMatIn[i * 9 + 8] > threshold){
afEulerOut[i * 3 + 0] = 0;
afEulerOut[i * 3 + 1] = 0;
afEulerOut[i * 3 + 2] = atan2(afMatIn[i*9 + 3], afMatIn[i*9 + 0]); // atan2(m[1, 0], m[0, 0])
}
else if(afMatIn[i * 9 + 8] < - threshold){
afEulerOut[i * 3 + 0] = 0;
afEulerOut[i * 3 + 1] = PI;
afEulerOut[i * 3 + 2] = atan2(afMatIn[i*9 + 1], afMatIn[i*9 + 0]); // atan2(m[0, 1], m[0, 0])
}
else{
afEulerOut[i * 3 + 0] = atan2(afMatIn[i*9 + 2], - afMatIn[i*9 + 5]); // atan2(m[0, 2], -m[1, 2])
afEulerOut[i * 3 + 1] = atan2( sqrt(afMatIn[i*9 + 6] * afMatIn[i*9 + 6]
+ afMatIn[i*9 + 7] * afMatIn[i*9 + 7]),
afMatIn[i*9 + 8]); // atan2(np.sqrt(m[2, 0] ** 2 + m[2, 1] ** 2), m[2, 2])
afEulerOut[i * 3 + 2] = atan2( afMatIn[i*9 + 6], afMatIn[i*9 + 7]); // atan2(m[2, 0], m[2, 1])
if(afEulerOut[i * 3 + 0] < 0){
afEulerOut[i * 3 + 0] += 2 * PI;
}
if(afEulerOut[i * 3 + 1] < 0){
afEulerOut[i * 3 + 1] += 2 * PI;
}
if(afEulerOut[i * 3 + 2] < 0){
afEulerOut[i * 3 + 2] += 2 * PI;
}
}
}
} | cb24f4daa128982d383990013a5ababfd08e1d59.cu | #include "includes.h"
const float PI = 3.14159265359;
const float HALFPI = 0.5*PI;
texture<unsigned char, cudaTextureType3D, cudaReadModeElementType> tcExpData;
texture<float, cudaTextureType2D, cudaReadModeElementType> tfG; // texture to store scattering vectors;
typedef struct {
int iNPixelJ, iNPixelK;
float fPixelJ, fPixelK;
float afCoordOrigin[3];
float afNorm[3];
float afJVector[3];
float afKVector[3];
float fNRot, fAngleStart,fAngleEnd;
} DetInfo;
__global__ void mat_to_euler_ZXZ(float* afMatIn, float* afEulerOut, int iNAngle){
/*
* transform active rotation matrix to euler angles in ZXZ convention, not right(seems right now)
* afMatIn: iNAngle * 9
* afEulerOut: iNAngle* 3
* TEST PASSED
*/
float threshold = 0.9999999;
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<iNAngle){
if(afMatIn[i * 9 + 8] > threshold){
afEulerOut[i * 3 + 0] = 0;
afEulerOut[i * 3 + 1] = 0;
afEulerOut[i * 3 + 2] = atan2(afMatIn[i*9 + 3], afMatIn[i*9 + 0]); // atan2(m[1, 0], m[0, 0])
}
else if(afMatIn[i * 9 + 8] < - threshold){
afEulerOut[i * 3 + 0] = 0;
afEulerOut[i * 3 + 1] = PI;
afEulerOut[i * 3 + 2] = atan2(afMatIn[i*9 + 1], afMatIn[i*9 + 0]); // atan2(m[0, 1], m[0, 0])
}
else{
afEulerOut[i * 3 + 0] = atan2(afMatIn[i*9 + 2], - afMatIn[i*9 + 5]); // atan2(m[0, 2], -m[1, 2])
afEulerOut[i * 3 + 1] = atan2( sqrt(afMatIn[i*9 + 6] * afMatIn[i*9 + 6]
+ afMatIn[i*9 + 7] * afMatIn[i*9 + 7]),
afMatIn[i*9 + 8]); // atan2(np.sqrt(m[2, 0] ** 2 + m[2, 1] ** 2), m[2, 2])
afEulerOut[i * 3 + 2] = atan2( afMatIn[i*9 + 6], afMatIn[i*9 + 7]); // atan2(m[2, 0], m[2, 1])
if(afEulerOut[i * 3 + 0] < 0){
afEulerOut[i * 3 + 0] += 2 * PI;
}
if(afEulerOut[i * 3 + 1] < 0){
afEulerOut[i * 3 + 1] += 2 * PI;
}
if(afEulerOut[i * 3 + 2] < 0){
afEulerOut[i * 3 + 2] += 2 * PI;
}
}
}
} |
5caeb66a3be69955a178a79083d731e9f0664f32.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2008-2011 Ames Laboratory
Iowa State University and The Regents of the University of Michigan All rights
reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: jglaser
#include "ParticleData.cuh"
#include "kernels/scan.cuh"
/*! \file ParticleGroup.cu
\brief Contains GPU kernel code used by ParticleGroup
*/
//! GPU kernel to translate between global and local membership lookup table
__global__ void gpu_rebuild_index_list_kernel(unsigned int N,
unsigned int *d_tag,
unsigned char *d_is_member_tag,
unsigned char *d_is_member)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) return;
unsigned int tag = d_tag[idx];
d_is_member[idx] = d_is_member_tag[tag];
}
__global__ void gpu_scatter_member_indices(unsigned int N,
const unsigned int *d_scan,
const unsigned char *d_is_member,
unsigned *d_member_idx)
{
unsigned int idx = blockIdx.x*blockDim.x+threadIdx.x;
if (idx >= N) return;
if (d_is_member[idx])
d_member_idx[d_scan[idx]] = idx;
}
//! GPU method for rebuilding the index list of a ParticleGroup
/*! \param N number of local particles
\param d_is_member_tag Global lookup table for tag -> group membership
\param d_is_member Array of membership flags
\param d_member_idx Array of member indices
\param d_tag Array of tags
\param num_local_members Number of members on the local processor (return value)
*/
hipError_t gpu_rebuild_index_list(unsigned int N,
unsigned char *d_is_member_tag,
unsigned char *d_is_member,
unsigned int *d_member_idx,
unsigned int *d_tag,
unsigned int &num_local_members,
unsigned int *d_tmp,
mgpu::ContextPtr mgpu_context)
{
assert(d_is_member);
assert(d_is_member_tag);
assert(d_member_idx);
assert(d_tag);
unsigned int block_size = 512;
unsigned int n_blocks = N/block_size + 1;
hipLaunchKernelGGL(( gpu_rebuild_index_list_kernel), dim3(n_blocks),dim3(block_size), 0, 0, N,
d_tag,
d_is_member_tag,
d_is_member);
// compute member_idx offsets
mgpu::Scan<mgpu::MgpuScanTypeExc>(d_is_member, N, (unsigned int) 0, mgpu::plus<unsigned int>(),
(unsigned int *) NULL, &num_local_members, d_tmp, *mgpu_context);
// fill member_idx array
hipLaunchKernelGGL(( gpu_scatter_member_indices), dim3(n_blocks), dim3(block_size), 0, 0, N, d_tmp, d_is_member, d_member_idx);
return hipSuccess;
}
| 5caeb66a3be69955a178a79083d731e9f0664f32.cu | /*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2008-2011 Ames Laboratory
Iowa State University and The Regents of the University of Michigan All rights
reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: jglaser
#include "ParticleData.cuh"
#include "kernels/scan.cuh"
/*! \file ParticleGroup.cu
\brief Contains GPU kernel code used by ParticleGroup
*/
//! GPU kernel to translate between global and local membership lookup table
__global__ void gpu_rebuild_index_list_kernel(unsigned int N,
unsigned int *d_tag,
unsigned char *d_is_member_tag,
unsigned char *d_is_member)
{
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= N) return;
unsigned int tag = d_tag[idx];
d_is_member[idx] = d_is_member_tag[tag];
}
__global__ void gpu_scatter_member_indices(unsigned int N,
const unsigned int *d_scan,
const unsigned char *d_is_member,
unsigned *d_member_idx)
{
unsigned int idx = blockIdx.x*blockDim.x+threadIdx.x;
if (idx >= N) return;
if (d_is_member[idx])
d_member_idx[d_scan[idx]] = idx;
}
//! GPU method for rebuilding the index list of a ParticleGroup
/*! \param N number of local particles
\param d_is_member_tag Global lookup table for tag -> group membership
\param d_is_member Array of membership flags
\param d_member_idx Array of member indices
\param d_tag Array of tags
\param num_local_members Number of members on the local processor (return value)
*/
cudaError_t gpu_rebuild_index_list(unsigned int N,
unsigned char *d_is_member_tag,
unsigned char *d_is_member,
unsigned int *d_member_idx,
unsigned int *d_tag,
unsigned int &num_local_members,
unsigned int *d_tmp,
mgpu::ContextPtr mgpu_context)
{
assert(d_is_member);
assert(d_is_member_tag);
assert(d_member_idx);
assert(d_tag);
unsigned int block_size = 512;
unsigned int n_blocks = N/block_size + 1;
gpu_rebuild_index_list_kernel<<<n_blocks,block_size>>>(N,
d_tag,
d_is_member_tag,
d_is_member);
// compute member_idx offsets
mgpu::Scan<mgpu::MgpuScanTypeExc>(d_is_member, N, (unsigned int) 0, mgpu::plus<unsigned int>(),
(unsigned int *) NULL, &num_local_members, d_tmp, *mgpu_context);
// fill member_idx array
gpu_scatter_member_indices<<<n_blocks, block_size>>>(N, d_tmp, d_is_member, d_member_idx);
return cudaSuccess;
}
|
c8a88e1a101d446664a4e329ee06e5cdd9b1b40f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ndt_gpu/VoxelGrid.h"
#include "ndt_gpu/debug.h"
#include "ndt_gpu/common.h"
#include <math.h>
#include <limits>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/scan.h>
#include <thrust/fill.h>
#include <inttypes.h>
#include <vector>
#include <cmath>
#include <stdio.h>
#include <sys/time.h>
#include "ndt_gpu/SymmetricEigenSolver.h"
namespace gpu {
GVoxelGrid::GVoxelGrid() :
x_(NULL),
y_(NULL),
z_(NULL),
points_num_(0),
centroid_(NULL),
covariance_(NULL),
inverse_covariance_(NULL),
points_per_voxel_(NULL),
voxel_num_(0),
max_x_(FLT_MAX),
max_y_(FLT_MAX),
max_z_(FLT_MAX),
min_x_(FLT_MIN),
min_y_(FLT_MIN),
min_z_(FLT_MIN),
voxel_x_(0),
voxel_y_(0),
voxel_z_(0),
max_b_x_(0),
max_b_y_(0),
max_b_z_(0),
min_b_x_(0),
min_b_y_(0),
min_b_z_(0),
vgrid_x_(0),
vgrid_y_(0),
vgrid_z_(0),
min_points_per_voxel_(6),
starting_point_ids_(NULL),
point_ids_(NULL),
is_copied_(false) {
};
GVoxelGrid::GVoxelGrid(const GVoxelGrid &other) {
x_ = other.x_;
y_ = other.y_;
z_ = other.z_;
points_num_ = other.points_num_;
centroid_ = other.centroid_;
covariance_ = other.covariance_;
inverse_covariance_ = other.inverse_covariance_;
points_per_voxel_ = other.points_per_voxel_;
voxel_num_ = other.voxel_num_;
max_x_ = other.max_x_;
max_y_ = other.max_y_;
max_z_ = other.max_z_;
min_x_ = other.min_x_;
min_y_ = other.min_y_;
min_z_ = other.min_z_;
voxel_x_ = other.voxel_x_;
voxel_y_ = other.voxel_y_;
voxel_z_ = other.voxel_z_;
max_b_x_ = other.max_b_x_;
max_b_y_ = other.max_b_y_;
max_b_z_ = other.max_b_z_;
min_b_x_ = other.min_b_x_;
min_b_y_ = other.min_b_y_;
min_b_z_ = other.min_b_z_;
vgrid_x_ = other.vgrid_x_;
vgrid_y_ = other.vgrid_y_;
vgrid_z_ = other.vgrid_z_;
min_points_per_voxel_ = other.min_points_per_voxel_;
starting_point_ids_ = other.starting_point_ids_;
point_ids_ = other.point_ids_;
is_copied_ = true;
}
GVoxelGrid::~GVoxelGrid() {
if (!is_copied_) {
for (unsigned int i = 1; i < octree_centroids_.size(); i++) {
if (octree_centroids_[i] != NULL) {
checkCudaErrors(hipFree(octree_centroids_[i]));
octree_centroids_[i] = NULL;
}
if (octree_points_per_node_[i] != NULL) {
checkCudaErrors(hipFree(octree_points_per_node_[i]));
octree_points_per_node_[i] = NULL;
}
}
octree_centroids_.clear();
octree_points_per_node_.clear();
octree_grid_size_.clear();
if (starting_point_ids_ != NULL) {
checkCudaErrors(hipFree(starting_point_ids_));
starting_point_ids_ = NULL;
}
if (point_ids_ != NULL) {
checkCudaErrors(hipFree(point_ids_));
point_ids_ = NULL;
}
if (centroid_ != NULL) {
checkCudaErrors(hipFree(centroid_));
centroid_ = NULL;
}
if (covariance_ != NULL) {
checkCudaErrors(hipFree(covariance_));
covariance_ = NULL;
}
if (inverse_covariance_ != NULL) {
checkCudaErrors(hipFree(inverse_covariance_));
inverse_covariance_ = NULL;
}
if (points_per_voxel_ != NULL) {
checkCudaErrors(hipFree(points_per_voxel_));
points_per_voxel_ = NULL;
}
}
}
void GVoxelGrid::initialize() {
if (centroid_ != NULL) {
checkCudaErrors(hipFree(centroid_));
centroid_ = NULL;
}
if (covariance_ != NULL) {
checkCudaErrors(hipFree(covariance_));
covariance_ = NULL;
}
if (inverse_covariance_ != NULL) {
checkCudaErrors(hipFree(inverse_covariance_));
inverse_covariance_ = NULL;
}
if (points_per_voxel_ != NULL) {
checkCudaErrors(hipFree(points_per_voxel_));
points_per_voxel_ = NULL;
}
checkCudaErrors(hipMalloc(¢roid_, sizeof(double) * 3 * voxel_num_));
checkCudaErrors(hipMalloc(&covariance_, sizeof(double) * 9 * voxel_num_));
checkCudaErrors(hipMalloc(&inverse_covariance_, sizeof(double) * 9 * voxel_num_));
checkCudaErrors(hipMalloc(&points_per_voxel_, sizeof(int) * voxel_num_));
checkCudaErrors(hipMemset(inverse_covariance_, 0, sizeof(double) * 9 * voxel_num_));
checkCudaErrors(hipMemset(points_per_voxel_, 0, sizeof(int) * voxel_num_));
checkCudaErrors(hipDeviceSynchronize());
}
int GVoxelGrid::getVoxelNum() const {
return voxel_num_;
}
float GVoxelGrid::getMaxX() const {
return max_x_;
}
float GVoxelGrid::getMaxY() const {
return max_y_;
}
float GVoxelGrid::getMaxZ() const {
return max_z_;
}
float GVoxelGrid::getMinX() const {
return min_x_;
}
float GVoxelGrid::getMinY() const {
return min_y_;
}
float GVoxelGrid::getMinZ() const {
return min_z_;
}
float GVoxelGrid::getVoxelX() const {
return voxel_x_;
}
float GVoxelGrid::getVoxelY() const {
return voxel_y_;
}
float GVoxelGrid::getVoxelZ() const {
return voxel_z_;
}
int GVoxelGrid::getMaxBX() const {
return max_b_x_;
}
int GVoxelGrid::getMaxBY() const {
return max_b_y_;
}
int GVoxelGrid::getMaxBZ() const {
return max_b_z_;
}
int GVoxelGrid::getMinBX() const {
return min_b_x_;
}
int GVoxelGrid::getMinBY() const {
return min_b_y_;
}
int GVoxelGrid::getMinBZ() const {
return min_b_z_;
}
int GVoxelGrid::getVgridX() const {
return vgrid_x_;
}
int GVoxelGrid::getVgridY() const {
return vgrid_y_;
}
int GVoxelGrid::getVgridZ() const {
return vgrid_z_;
}
void GVoxelGrid::setLeafSize(float voxel_x, float voxel_y, float voxel_z) {
voxel_x_ = voxel_x;
voxel_y_ = voxel_y;
voxel_z_ = voxel_z;
}
double *GVoxelGrid::getCentroidList() const {
return centroid_;
}
double *GVoxelGrid::getCovarianceList() const {
return covariance_;
}
double *GVoxelGrid::getInverseCovarianceList() const {
return inverse_covariance_;
}
int *GVoxelGrid::getPointsPerVoxelList() const {
return points_per_voxel_;
}
extern "C" __device__ int voxelId(float x, float y, float z,
float voxel_x, float voxel_y, float voxel_z,
int min_b_x, int min_b_y, int min_b_z,
int vgrid_x, int vgrid_y, int vgrid_z) {
int id_x = static_cast<int>(floorf(x / voxel_x) - static_cast<float>(min_b_x));
int id_y = static_cast<int>(floorf(y / voxel_y) - static_cast<float>(min_b_y));
int id_z = static_cast<int>(floorf(z / voxel_z) - static_cast<float>(min_b_z));
return (id_x + id_y * vgrid_x + id_z * vgrid_x * vgrid_y);
}
/* First step to compute centroids and covariances of voxels. */
extern "C" __global__ void
initCentroidAndCovariance(float *x, float *y, float *z, int *starting_point_ids, int *point_ids,
double *centroids, double *covariances, int voxel_num) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < voxel_num; i += stride) {
MatrixDevice centr(3, 1, voxel_num, centroids + i);
MatrixDevice cov(3, 3, voxel_num, covariances + i);
double centr0, centr1, centr2;
double cov00, cov01, cov02, cov11, cov12, cov22;
centr0 = centr1 = centr2 = 0.0;
cov00 = cov11 = cov22 = 1.0;
cov01 = cov02 = cov12 = 0.0;
for (int j = starting_point_ids[i]; j < starting_point_ids[i + 1]; j++) {
int pid = point_ids[j];
double t_x = static_cast<double>(x[pid]);
double t_y = static_cast<double>(y[pid]);
double t_z = static_cast<double>(z[pid]);
centr0 += t_x;
centr1 += t_y;
centr2 += t_z;
cov00 += t_x * t_x;
cov01 += t_x * t_y;
cov02 += t_x * t_z;
cov11 += t_y * t_y;
cov12 += t_y * t_z;
cov22 += t_z * t_z;
}
centr(0) = centr0;
centr(1) = centr1;
centr(2) = centr2;
cov(0, 0) = cov00;
cov(0, 1) = cov01;
cov(0, 2) = cov02;
cov(1, 1) = cov11;
cov(1, 2) = cov12;
cov(2, 2) = cov22;
}
}
/* Update centroids of voxels. */
extern "C" __global__ void updateVoxelCentroid(double *centroid, int *points_per_voxel, int voxel_num) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int vid = index; vid < voxel_num; vid += stride) {
MatrixDevice centr(3, 1, voxel_num, centroid + vid);
double points_num = static_cast<double>(points_per_voxel[vid]);
if (points_num > 0) {
centr /= points_num;
}
}
}
/* Update covariance of voxels. */
extern "C" __global__ void
updateVoxelCovariance(double *centroid, double *pt_sum, double *covariance, int *points_per_voxel, int voxel_num,
int min_points_per_voxel) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int vid = index; vid < voxel_num; vid += stride) {
MatrixDevice centr(3, 1, voxel_num, centroid + vid);
MatrixDevice cov(3, 3, voxel_num, covariance + vid);
MatrixDevice pts(3, 1, voxel_num, pt_sum + vid);
double points_num = static_cast<double>(points_per_voxel[vid]);
double c0 = centr(0);
double c1 = centr(1);
double c2 = centr(2);
double p0 = pts(0);
double p1 = pts(1);
double p2 = pts(2);
points_per_voxel[vid] = (points_num < min_points_per_voxel) ? 0 : points_num;
if (points_num >= min_points_per_voxel) {
double mult = (points_num - 1.0) / points_num;
cov(0, 0) = ((cov(0, 0) - 2.0 * p0 * c0) / points_num + c0 * c0) * mult;
cov(0, 1) = ((cov(0, 1) - 2.0 * p0 * c1) / points_num + c0 * c1) * mult;
cov(0, 2) = ((cov(0, 2) - 2.0 * p0 * c2) / points_num + c0 * c2) * mult;
cov(1, 0) = cov(0, 1);
cov(1, 1) = ((cov(1, 1) - 2.0 * p1 * c1) / points_num + c1 * c1) * mult;
cov(1, 2) = ((cov(1, 2) - 2.0 * p1 * c2) / points_num + c1 * c2) * mult;
cov(2, 0) = cov(0, 2);
cov(2, 1) = cov(1, 2);
cov(2, 2) = ((cov(2, 2) - 2.0 * p2 * c2) / points_num + c2 * c2) * mult;
}
}
}
extern "C" __global__ void
computeInverseEigenvectors(double *inverse_covariance, int *points_per_voxel, int voxel_num, double *eigenvectors,
int min_points_per_voxel) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int vid = index; vid < voxel_num; vid += stride) {
if (points_per_voxel[vid] >= min_points_per_voxel) {
MatrixDevice icov(3, 3, voxel_num, inverse_covariance + vid);
MatrixDevice eigen_vectors(3, 3, voxel_num, eigenvectors + vid);
eigen_vectors.inverse(icov);
}
__syncthreads();
}
}
//eigen_vecs = eigen_vecs * eigen_val
extern "C" __global__ void
updateCovarianceS0(int *points_per_voxel, int voxel_num, double *eigenvalues, double *eigenvectors,
int min_points_per_voxel) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int vid = index; vid < voxel_num; vid += stride) {
if (points_per_voxel[vid] >= min_points_per_voxel) {
MatrixDevice eigen_vectors(3, 3, voxel_num, eigenvectors + vid);
double eig_val0 = eigenvalues[vid];
double eig_val1 = eigenvalues[vid + voxel_num];
double eig_val2 = eigenvalues[vid + 2 * voxel_num];
eigen_vectors(0, 0) *= eig_val0;
eigen_vectors(1, 0) *= eig_val0;
eigen_vectors(2, 0) *= eig_val0;
eigen_vectors(0, 1) *= eig_val1;
eigen_vectors(1, 1) *= eig_val1;
eigen_vectors(2, 1) *= eig_val1;
eigen_vectors(0, 2) *= eig_val2;
eigen_vectors(1, 2) *= eig_val2;
eigen_vectors(2, 2) *= eig_val2;
}
__syncthreads();
}
}
//cov = new eigen_vecs * eigen_vecs transpose
extern "C" __global__ void
updateCovarianceS1(double *covariance, double *inverse_covariance, int *points_per_voxel, int voxel_num,
double *eigenvectors, int min_points_per_voxel, int col) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int vid = index; vid < voxel_num; vid += stride) {
if (points_per_voxel[vid] >= min_points_per_voxel) {
MatrixDevice cov(3, 3, voxel_num, covariance + vid);
MatrixDevice icov(3, 3, voxel_num, inverse_covariance + vid);
MatrixDevice eigen_vectors(3, 3, voxel_num, eigenvectors + vid);
double tmp0 = icov(0, col);
double tmp1 = icov(1, col);
double tmp2 = icov(2, col);
cov(0, col) = eigen_vectors(0, 0) * tmp0 + eigen_vectors(0, 1) * tmp1 + eigen_vectors(0, 2) * tmp2;
cov(1, col) = eigen_vectors(1, 0) * tmp0 + eigen_vectors(1, 1) * tmp1 + eigen_vectors(1, 2) * tmp2;
cov(2, col) = eigen_vectors(2, 0) * tmp0 + eigen_vectors(2, 1) * tmp1 + eigen_vectors(2, 2) * tmp2;
}
__syncthreads();
}
}
extern "C" __global__ void
computeInverseCovariance(double *covariance, double *inverse_covariance, int *points_per_voxel, int voxel_num,
int min_points_per_voxel) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int vid = index; vid < voxel_num; vid += stride) {
if (points_per_voxel[vid] >= min_points_per_voxel) {
MatrixDevice cov(3, 3, voxel_num, covariance + vid);
MatrixDevice icov(3, 3, voxel_num, inverse_covariance + vid);
cov.inverse(icov);
}
__syncthreads();
}
}
template<typename T>
__global__ void init(T *input, int size, int local_size) {
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < size; i += blockDim.x * gridDim.x) {
for (int j = 0; j < local_size; j++)
input[i + j * size] = 1;
}
}
extern "C" __global__ void initBoolean(bool *input, int size) {
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < size; i += blockDim.x * gridDim.x) {
input[i] = (i % 2 == 0) ? true : false;
}
}
/* Normalize input matrices to avoid overflow. */
extern "C" __global__ void
normalize(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel) {
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel) {}
sv.normalizeInput(id);
__syncthreads();
}
}
/* Compute eigenvalues. Eigenvalues are arranged in increasing order.
* (eigen(0) <= eigen(1) <= eigen(2). */
extern "C" __global__ void
computeEigenvalues(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel) {
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel)
sv.computeEigenvalues(id);
__syncthreads();
}
}
/* First step to compute eigenvector 0 of covariance matrices. */
extern "C" __global__ void
computeEvec00(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel) {
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel)
sv.computeEigenvector00(id);
__syncthreads();
}
}
/* Second step to compute eigenvector 0 of covariance matrices. */
extern "C" __global__ void
computeEvec01(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel) {
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel)
sv.computeEigenvector01(id);
__syncthreads();
}
}
/* First step to compute eigenvector 1 of covariance matrices. */
extern "C" __global__ void
computeEvec10(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel) {
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel)
sv.computeEigenvector10(id);
__syncthreads();
}
}
/* Second step to compute eigenvector 1 of covariance matrices. */
extern "C" __global__ void
computeEvec11(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel) {
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel)
sv.computeEigenvector11(id);
__syncthreads();
}
}
/* Compute eigenvector 2 of covariance matrices. */
extern "C" __global__ void
computeEvec2(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel) {
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel)
sv.computeEigenvector2(id);
__syncthreads();
}
}
/* Final step to compute eigenvalues. */
extern "C" __global__ void
updateEval(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel) {
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel)
sv.updateEigenvalues(id);
__syncthreads();
}
}
/* Update eigenvalues in the case covariance matrix is nearly singular. */
extern "C" __global__ void
updateEval2(double *eigenvalues, int *points_per_voxel, int voxel_num, int min_points_per_voxel) {
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel) {
MatrixDevice eigen_val(3, 1, voxel_num, eigenvalues + id);
double ev0 = eigen_val(0);
double ev1 = eigen_val(1);
double ev2 = eigen_val(2);
if (ev0 < 0 || ev1 < 0 || ev2 <= 0) {
points_per_voxel[id] = 0;
continue;
}
double min_cov_eigvalue = ev2 * 0.01;
if (ev0 < min_cov_eigvalue) {
ev0 = min_cov_eigvalue;
if (ev1 < min_cov_eigvalue) {
ev1 = min_cov_eigvalue;
}
}
eigen_val(0) = ev0;
eigen_val(1) = ev1;
eigen_val(2) = ev2;
__syncthreads();
}
}
}
void GVoxelGrid::computeCentroidAndCovariance() {
int block_x = (voxel_num_ > BLOCK_SIZE_X) ? BLOCK_SIZE_X : voxel_num_;
int grid_x = (voxel_num_ - 1) / block_x + 1;
hipLaunchKernelGGL(( initCentroidAndCovariance), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, starting_point_ids_, point_ids_, centroid_,
covariance_, voxel_num_);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
double *pt_sum;
checkCudaErrors(hipMalloc(&pt_sum, sizeof(double) * voxel_num_ * 3));
checkCudaErrors(hipMemcpy(pt_sum, centroid_, sizeof(double) * voxel_num_ * 3, hipMemcpyDeviceToDevice));
hipLaunchKernelGGL(( updateVoxelCentroid), dim3(grid_x), dim3(block_x), 0, 0, centroid_, points_per_voxel_, voxel_num_);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( updateVoxelCovariance), dim3(grid_x), dim3(block_x), 0, 0, centroid_, pt_sum, covariance_, points_per_voxel_, voxel_num_,
min_points_per_voxel_);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipFree(pt_sum));
double *eigenvalues_dev, *eigenvectors_dev;
checkCudaErrors(hipMalloc(&eigenvalues_dev, sizeof(double) * 3 * voxel_num_));
checkCudaErrors(hipMalloc(&eigenvectors_dev, sizeof(double) * 9 * voxel_num_));
// Solving eigenvalues and eigenvectors problem by the GPU.
SymmetricEigensolver3x3 sv(voxel_num_);
sv.setInputMatrices(covariance_);
sv.setEigenvalues(eigenvalues_dev);
sv.setEigenvectors(eigenvectors_dev);
hipLaunchKernelGGL(( normalize), dim3(grid_x), dim3(block_x), 0, 0, sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( computeEigenvalues), dim3(grid_x), dim3(block_x), 0, 0, sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( computeEvec00), dim3(grid_x), dim3(block_x), 0, 0, sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( computeEvec01), dim3(grid_x), dim3(block_x), 0, 0, sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( computeEvec10), dim3(grid_x), dim3(block_x), 0, 0, sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( computeEvec11), dim3(grid_x), dim3(block_x), 0, 0, sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( computeEvec2), dim3(grid_x), dim3(block_x), 0, 0, sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( updateEval), dim3(grid_x), dim3(block_x), 0, 0, sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( updateEval2), dim3(grid_x), dim3(block_x), 0, 0, eigenvalues_dev, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( computeInverseEigenvectors), dim3(grid_x), dim3(block_x), 0, 0, inverse_covariance_, points_per_voxel_, voxel_num_,
eigenvectors_dev, min_points_per_voxel_);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( updateCovarianceS0), dim3(grid_x), dim3(block_x), 0, 0, points_per_voxel_, voxel_num_, eigenvalues_dev, eigenvectors_dev,
min_points_per_voxel_);
checkCudaErrors(hipGetLastError());
for (int i = 0; i < 3; i++) {
hipLaunchKernelGGL(( updateCovarianceS1), dim3(grid_x), dim3(block_x), 0, 0, covariance_, inverse_covariance_, points_per_voxel_, voxel_num_,
eigenvectors_dev, min_points_per_voxel_, i);
checkCudaErrors(hipGetLastError());
}
checkCudaErrors(hipDeviceSynchronize());
hipLaunchKernelGGL(( computeInverseCovariance), dim3(grid_x), dim3(block_x), 0, 0, covariance_, inverse_covariance_, points_per_voxel_, voxel_num_,
min_points_per_voxel_);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
sv.memFree();
checkCudaErrors(hipFree(eigenvalues_dev));
checkCudaErrors(hipFree(eigenvectors_dev));
}
//Input are supposed to be in device memory
void GVoxelGrid::setInput(float *x, float *y, float *z, int points_num) {
if (points_num <= 0)
return;
x_ = x;
y_ = y;
z_ = z;
points_num_ = points_num;
//std::cerr<<("GVoxelGrid 1")<<std::endl;
findBoundaries();
//std::cerr<<("GVoxelGrid 2")<<std::endl;
voxel_num_ = vgrid_x_ * vgrid_y_ * vgrid_z_;
//std::cerr<<("GVoxelGrid 3")<<std::endl;
initialize();
//std::cerr<<("GVoxelGrid 4")<<std::endl;
scatterPointsToVoxelGrid();
//std::cerr<<("GVoxelGrid 5")<<std::endl;
computeCentroidAndCovariance();
//std::cerr<<("GVoxelGrid 6")<<std::endl;
buildOctree();
//std::cerr<<("GVoxelGrid 7")<<std::endl;
}
/* Find the largest coordinate values */
extern "C" __global__ void findMax(float *x, float *y, float *z, int full_size, int half_size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < half_size; i += stride) {
x[i] = (i + half_size < full_size) ? ((x[i] >= x[i + half_size]) ? x[i] : x[i + half_size]) : x[i];
y[i] = (i + half_size < full_size) ? ((y[i] >= y[i + half_size]) ? y[i] : y[i + half_size]) : y[i];
z[i] = (i + half_size < full_size) ? ((z[i] >= z[i + half_size]) ? z[i] : z[i + half_size]) : z[i];
}
}
/* Find the smallest coordinate values */
extern "C" __global__ void findMin(float *x, float *y, float *z, int full_size, int half_size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < half_size; i += stride) {
x[i] = (i + half_size < full_size) ? ((x[i] <= x[i + half_size]) ? x[i] : x[i + half_size]) : x[i];
y[i] = (i + half_size < full_size) ? ((y[i] <= y[i + half_size]) ? y[i] : y[i + half_size]) : y[i];
z[i] = (i + half_size < full_size) ? ((z[i] <= z[i + half_size]) ? z[i] : z[i + half_size]) : z[i];
}
}
void GVoxelGrid::findBoundaries() {
float *max_x, *max_y, *max_z, *min_x, *min_y, *min_z;
checkCudaErrors(hipMalloc(&max_x, sizeof(float) * points_num_));
checkCudaErrors(hipMalloc(&max_y, sizeof(float) * points_num_));
checkCudaErrors(hipMalloc(&max_z, sizeof(float) * points_num_));
checkCudaErrors(hipMalloc(&min_x, sizeof(float) * points_num_));
checkCudaErrors(hipMalloc(&min_y, sizeof(float) * points_num_));
checkCudaErrors(hipMalloc(&min_z, sizeof(float) * points_num_));
checkCudaErrors(hipMemcpy(max_x, x_, sizeof(float) * points_num_, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(max_y, y_, sizeof(float) * points_num_, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(max_z, z_, sizeof(float) * points_num_, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(min_x, x_, sizeof(float) * points_num_, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(min_y, y_, sizeof(float) * points_num_, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMemcpy(min_z, z_, sizeof(float) * points_num_, hipMemcpyDeviceToDevice));
int points_num = points_num_;
while (points_num > 1) {
int half_points_num = (points_num - 1) / 2 + 1;
int block_x = (half_points_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : half_points_num;
int grid_x = (half_points_num - 1) / block_x + 1;
hipLaunchKernelGGL(( findMax), dim3(grid_x), dim3(block_x), 0, 0, max_x, max_y, max_z, points_num, half_points_num);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( findMin), dim3(grid_x), dim3(block_x), 0, 0, min_x, min_y, min_z, points_num, half_points_num);
checkCudaErrors(hipGetLastError());
points_num = half_points_num;
}
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpy(&max_x_, max_x, sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&max_y_, max_y, sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&max_z_, max_z, sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&min_x_, min_x, sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&min_y_, min_y, sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&min_z_, min_z, sizeof(float), hipMemcpyDeviceToHost));
max_b_x_ = static_cast<int> (floor(max_x_ / voxel_x_));
max_b_y_ = static_cast<int> (floor(max_y_ / voxel_y_));
max_b_z_ = static_cast<int> (floor(max_z_ / voxel_z_));
min_b_x_ = static_cast<int> (floor(min_x_ / voxel_x_));
min_b_y_ = static_cast<int> (floor(min_y_ / voxel_y_));
min_b_z_ = static_cast<int> (floor(min_z_ / voxel_z_));
vgrid_x_ = max_b_x_ - min_b_x_ + 1;
vgrid_y_ = max_b_y_ - min_b_y_ + 1;
vgrid_z_ = max_b_z_ - min_b_z_ + 1;
checkCudaErrors(hipFree(max_x));
checkCudaErrors(hipFree(max_y));
checkCudaErrors(hipFree(max_z));
checkCudaErrors(hipFree(min_x));
checkCudaErrors(hipFree(min_y));
checkCudaErrors(hipFree(min_z));
}
/* Find indexes idx, idy and idz of candidate voxels */
extern "C" __global__ void findBoundariesOfCandidateVoxels(float *x, float *y, float *z,
float radius, int points_num,
float voxel_x, float voxel_y, float voxel_z,
int max_b_x, int max_b_y, int max_b_z,
int min_b_x, int min_b_y, int min_b_z,
int *max_vid_x, int *max_vid_y, int *max_vid_z,
int *min_vid_x, int *min_vid_y, int *min_vid_z,
int *candidate_voxel_per_point) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < points_num; i += stride) {
float t_x = x[i];
float t_y = y[i];
float t_z = z[i];
int max_id_x = static_cast<int>(floorf((t_x + radius) / voxel_x));
int max_id_y = static_cast<int>(floorf((t_y + radius) / voxel_y));
int max_id_z = static_cast<int>(floorf((t_z + radius) / voxel_z));
int min_id_x = static_cast<int>(floorf((t_x - radius) / voxel_x));
int min_id_y = static_cast<int>(floorf((t_y - radius) / voxel_y));
int min_id_z = static_cast<int>(floorf((t_z - radius) / voxel_z));
/* Find intersection of the cube containing
* the NN sphere of the point and the voxel grid
*/
max_id_x = (max_id_x > max_b_x) ? max_b_x - min_b_x : max_id_x - min_b_x;
max_id_y = (max_id_y > max_b_y) ? max_b_y - min_b_y : max_id_y - min_b_y;
max_id_z = (max_id_z > max_b_z) ? max_b_z - min_b_z : max_id_z - min_b_z;
min_id_x = (min_id_x < min_b_x) ? 0 : min_id_x - min_b_x;
min_id_y = (min_id_y < min_b_y) ? 0 : min_id_y - min_b_y;
min_id_z = (min_id_z < min_b_z) ? 0 : min_id_z - min_b_z;
int vx = max_id_x - min_id_x + 1;
int vy = max_id_y - min_id_y + 1;
int vz = max_id_z - min_id_z + 1;
candidate_voxel_per_point[i] = (vx > 0 && vy > 0 && vz > 0) ? vx * vy * vz : 0;
max_vid_x[i] = max_id_x;
max_vid_y[i] = max_id_y;
max_vid_z[i] = max_id_z;
min_vid_x[i] = min_id_x;
min_vid_y[i] = min_id_y;
min_vid_z[i] = min_id_z;
}
}
/* Write id of valid points to the output buffer */
extern "C" __global__ void
collectValidPoints(int *valid_points_mark, int *valid_points_id, int *valid_points_location, int points_num) {
for (int index = threadIdx.x + blockIdx.x * blockDim.x; index < points_num; index += blockDim.x * gridDim.x) {
if (valid_points_mark[index] != 0) {
valid_points_id[valid_points_location[index]] = index;
}
}
}
/* Compute the global index of candidate voxels.
* global index = idx + idy * grid size x + idz * grid_size x * grid size y */
extern "C" __global__ void updateCandidateVoxelIds(int points_num,
int vgrid_x, int vgrid_y, int vgrid_z,
int *max_vid_x, int *max_vid_y, int *max_vid_z,
int *min_vid_x, int *min_vid_y, int *min_vid_z,
int *starting_voxel_id,
int *candidate_voxel_id) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < points_num; i += stride) {
int max_id_x = max_vid_x[i];
int max_id_y = max_vid_y[i];
int max_id_z = max_vid_z[i];
int min_id_x = min_vid_x[i];
int min_id_y = min_vid_y[i];
int min_id_z = min_vid_z[i];
int write_location = starting_voxel_id[i];
for (int j = min_id_x; j <= max_id_x; j++) {
for (int k = min_id_y; k <= max_id_y; k++) {
for (int l = min_id_z; l <= max_id_z; l++) {
candidate_voxel_id[write_location] = j + k * vgrid_x + l * vgrid_x * vgrid_y;
write_location++;
}
}
}
}
}
/* Find out which voxels are really inside the radius.
* This is done by comparing the distance between the centroid
* of the voxel and the query point with the radius.
*
* The valid_voxel_mark store the result of the inspection, which is 0
* if the centroid is outside the radius and 1 otherwise.
*
* The valid_points_mark store the status of the inspection per point.
* It is 0 if there is no voxels in the candidate list is truly a neighbor
* of the point, and 1 otherwise.
*
* The valid_voxel_count store the number of true neighbor voxels.
*/
extern "C" __global__ void inspectCandidateVoxels(float *x, float *y, float *z,
float radius, int max_nn, int points_num,
double *centroid, int *points_per_voxel, int offset,
int *starting_voxel_id, int *candidate_voxel_id,
int *valid_voxel_mark, int *valid_voxel_count,
int *valid_points_mark) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < points_num; i += stride) {
double t_x = static_cast<double>(x[i]);
double t_y = static_cast<double>(y[i]);
double t_z = static_cast<double>(z[i]);
int nn = 0;
for (int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1] && nn <= max_nn; j++) {
int point_num = points_per_voxel[candidate_voxel_id[j]];
MatrixDevice centr(3, 1, offset, centroid + candidate_voxel_id[j]);
double centroid_x = (point_num > 0) ? (t_x - centr(0)) : radius + 1;
double centroid_y = (point_num > 0) ? (t_y - centr(1)) : 0;
double centroid_z = (point_num > 0) ? (t_z - centr(2)) : 0;
bool res = (norm3d(centroid_x, centroid_y, centroid_z) <= radius);
valid_voxel_mark[j] = (res) ? 1 : 0;
nn += (res) ? 1 : 0;
}
valid_voxel_count[i] = nn;
valid_points_mark[i] = (nn > 0) ? 1 : 0;
__syncthreads();
}
}
/* Write the id of valid voxels to the output buffer */
extern "C" __global__ void
collectValidVoxels(int *valid_voxels_mark, int *candidate_voxel_id, int *output, int *writing_location,
int candidate_voxel_num) {
for (int index = threadIdx.x + blockIdx.x * blockDim.x;
index < candidate_voxel_num; index += blockDim.x * gridDim.x) {
if (valid_voxels_mark[index] == 1) {
output[writing_location[index]] = candidate_voxel_id[index];
}
}
}
/* Write the number of valid voxel per point to the output buffer */
extern "C" __global__ void
collectValidVoxelCount(int *input_valid_voxel_count, int *output_valid_voxel_count, int *writing_location,
int points_num) {
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < points_num; id += blockDim.x * gridDim.x) {
if (input_valid_voxel_count[id] != 0)
output_valid_voxel_count[writing_location[id]] = input_valid_voxel_count[id];
}
}
template<typename T>
void GVoxelGrid::ExclusiveScan(T *input, int ele_num, T *sum) {
thrust::device_ptr <T> dev_ptr(input);
thrust::exclusive_scan(dev_ptr, dev_ptr + ele_num, dev_ptr);
checkCudaErrors(hipDeviceSynchronize());
*sum = *(dev_ptr + ele_num - 1);
}
template<typename T>
void GVoxelGrid::ExclusiveScan(T *input, int ele_num) {
thrust::device_ptr <T> dev_ptr(input);
thrust::exclusive_scan(dev_ptr, dev_ptr + ele_num, dev_ptr);
checkCudaErrors(hipDeviceSynchronize());
}
void GVoxelGrid::radiusSearch(float *qx, float *qy, float *qz, int points_num, float radius, int max_nn,
int **valid_points, int **starting_voxel_id, int **valid_voxel_id,
int *valid_voxel_num, int *valid_points_num) {
//Testing input query points
int block_x = (points_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : points_num;
int grid_x = (points_num - 1) / block_x + 1;
//Boundaries of candidate voxels per points
int *max_vid_x, *max_vid_y, *max_vid_z;
int *min_vid_x, *min_vid_y, *min_vid_z;
checkCudaErrors(hipMalloc(&max_vid_x, sizeof(int) * points_num));
checkCudaErrors(hipMalloc(&max_vid_y, sizeof(int) * points_num));
checkCudaErrors(hipMalloc(&max_vid_z, sizeof(int) * points_num));
checkCudaErrors(hipMalloc(&min_vid_x, sizeof(int) * points_num));
checkCudaErrors(hipMalloc(&min_vid_y, sizeof(int) * points_num));
checkCudaErrors(hipMalloc(&min_vid_z, sizeof(int) * points_num));
//Determine the number of candidate voxel per points
int *candidate_voxel_num_per_point;
int total_candidate_voxel_num;
checkCudaErrors(hipMalloc(&candidate_voxel_num_per_point, sizeof(int) * (points_num + 1)));
hipLaunchKernelGGL(( findBoundariesOfCandidateVoxels), dim3(grid_x), dim3(block_x), 0, 0, qx, qy, qz, radius, points_num,
voxel_x_, voxel_y_, voxel_z_,
max_b_x_, max_b_y_, max_b_z_,
min_b_x_, min_b_y_, min_b_z_,
max_vid_x, max_vid_y, max_vid_z,
min_vid_x, min_vid_y, min_vid_z,
candidate_voxel_num_per_point);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
//Total candidate voxel num is determined by an exclusive scan on candidate_voxel_num_per_point
ExclusiveScan(candidate_voxel_num_per_point, points_num + 1, &total_candidate_voxel_num);
if (total_candidate_voxel_num <= 0) {
std::cout << "No candidate voxel was found. Exiting..." << std::endl;
checkCudaErrors(hipFree(max_vid_x));
checkCudaErrors(hipFree(max_vid_y));
checkCudaErrors(hipFree(max_vid_z));
checkCudaErrors(hipFree(min_vid_x));
checkCudaErrors(hipFree(min_vid_y));
checkCudaErrors(hipFree(min_vid_z));
checkCudaErrors(hipFree(candidate_voxel_num_per_point));
valid_points = NULL;
starting_voxel_id = NULL;
valid_voxel_id = NULL;
*valid_voxel_num = 0;
*valid_points_num = 0;
return;
}
//Determine the voxel id of candidate voxels
int *candidate_voxel_id;
checkCudaErrors(hipMalloc(&candidate_voxel_id, sizeof(int) * total_candidate_voxel_num));
hipLaunchKernelGGL(( updateCandidateVoxelIds), dim3(grid_x), dim3(block_x), 0, 0, points_num, vgrid_x_, vgrid_y_, vgrid_z_,
max_vid_x, max_vid_y, max_vid_z,
min_vid_x, min_vid_y, min_vid_z,
candidate_voxel_num_per_point, candidate_voxel_id);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
//Go through the candidate voxel id list and find out which voxels are really inside the radius
int *valid_voxel_mark;
checkCudaErrors(hipMalloc(&valid_voxel_mark, sizeof(int) * total_candidate_voxel_num));
int *valid_voxel_count;
checkCudaErrors(hipMalloc(&valid_voxel_count, sizeof(int) * (points_num + 1)));
int *valid_points_mark;
checkCudaErrors(hipMalloc(&valid_points_mark, sizeof(int) * points_num));
block_x = (total_candidate_voxel_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : total_candidate_voxel_num;
grid_x = (total_candidate_voxel_num - 1) / block_x + 1;
///CHECK VALID VOXEL COUNT AGAIN
hipLaunchKernelGGL(( inspectCandidateVoxels), dim3(grid_x), dim3(block_x), 0, 0, qx, qy, qz, radius, max_nn, points_num,
centroid_, points_per_voxel_, voxel_num_,
candidate_voxel_num_per_point, candidate_voxel_id,
valid_voxel_mark, valid_voxel_count, valid_points_mark);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
//Collect valid points
int *valid_points_location;
checkCudaErrors(hipMalloc(&valid_points_location, sizeof(int) * (points_num + 1)));
checkCudaErrors(hipMemset(valid_points_location, 0, sizeof(int) * (points_num + 1)));
checkCudaErrors(hipMemcpy(valid_points_location, valid_points_mark, sizeof(int) * points_num,
hipMemcpyDeviceToDevice));
//Writing location to the output buffer is determined by an exclusive scan
ExclusiveScan(valid_points_location, points_num + 1, valid_points_num);
if (*valid_points_num <= 0) {
//std::cout << "No valid point was found. Exiting..." << std::endl;
std::cout << "No valid point was found. Exiting...: " << *valid_points_num << std::endl;
checkCudaErrors(hipFree(max_vid_x));
checkCudaErrors(hipFree(max_vid_y));
checkCudaErrors(hipFree(max_vid_z));
checkCudaErrors(hipFree(min_vid_x));
checkCudaErrors(hipFree(min_vid_y));
checkCudaErrors(hipFree(min_vid_z));
checkCudaErrors(hipFree(candidate_voxel_num_per_point));
checkCudaErrors(hipFree(candidate_voxel_id));
checkCudaErrors(hipFree(valid_voxel_mark));
checkCudaErrors(hipFree(valid_voxel_count));
checkCudaErrors(hipFree(valid_points_mark));
checkCudaErrors(hipFree(valid_points_location));
valid_points = NULL;
starting_voxel_id = NULL;
valid_voxel_id = NULL;
*valid_voxel_num = 0;
*valid_points_num = 0;
return;
}
checkCudaErrors(hipMalloc(valid_points, sizeof(int) * (*valid_points_num)));
hipLaunchKernelGGL(( collectValidPoints), dim3(grid_x), dim3(block_x), 0, 0, valid_points_mark, *valid_points, valid_points_location, points_num);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMalloc(starting_voxel_id, sizeof(int) * (*valid_points_num + 1)));
hipLaunchKernelGGL(( collectValidVoxelCount), dim3(grid_x), dim3(block_x), 0, 0, valid_voxel_count, *starting_voxel_id, valid_points_location,
points_num);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
//Determine the starting location of voxels per points in the valid points list
ExclusiveScan(*starting_voxel_id, *valid_points_num + 1, valid_voxel_num);
//Collect valid voxels
int *valid_voxel_location;
checkCudaErrors(hipMalloc(&valid_voxel_location, sizeof(int) * (total_candidate_voxel_num + 1)));
checkCudaErrors(hipMemcpy(valid_voxel_location, valid_voxel_mark, sizeof(int) * total_candidate_voxel_num,
hipMemcpyDeviceToDevice));
ExclusiveScan(valid_voxel_location, total_candidate_voxel_num + 1, valid_voxel_num);
if (*valid_voxel_num <= 0) {
checkCudaErrors(hipFree(max_vid_x));
max_vid_x = NULL;
checkCudaErrors(hipFree(max_vid_y));
max_vid_y = NULL;
checkCudaErrors(hipFree(max_vid_z));
max_vid_z = NULL;
checkCudaErrors(hipFree(min_vid_x));
min_vid_x = NULL;
checkCudaErrors(hipFree(min_vid_y));
min_vid_y = NULL;
checkCudaErrors(hipFree(min_vid_z));
min_vid_z = NULL;
checkCudaErrors(hipFree(candidate_voxel_num_per_point));
candidate_voxel_num_per_point = NULL;
checkCudaErrors(hipFree(candidate_voxel_id));
candidate_voxel_id = NULL;
checkCudaErrors(hipFree(valid_voxel_mark));
valid_voxel_mark = NULL;
checkCudaErrors(hipFree(valid_voxel_count));
valid_voxel_count = NULL;
checkCudaErrors(hipFree(valid_points_mark));
valid_points_mark = NULL;
checkCudaErrors(hipFree(valid_points_location));
valid_points_location = NULL;
checkCudaErrors(hipFree(valid_voxel_location));
valid_voxel_location = NULL;
valid_points = NULL;
starting_voxel_id = NULL;
valid_voxel_id = NULL;
*valid_voxel_num = 0;
*valid_points_num = 0;
}
checkCudaErrors(hipMalloc(valid_voxel_id, sizeof(int) * (*valid_voxel_num)));
block_x = (total_candidate_voxel_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : total_candidate_voxel_num;
grid_x = (total_candidate_voxel_num - 1) / block_x + 1;
hipLaunchKernelGGL(( collectValidVoxels), dim3(grid_x), dim3(block_x), 0, 0, valid_voxel_mark, candidate_voxel_id, *valid_voxel_id,
valid_voxel_location, total_candidate_voxel_num);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipFree(max_vid_x));
checkCudaErrors(hipFree(max_vid_y));
checkCudaErrors(hipFree(max_vid_z));
checkCudaErrors(hipFree(min_vid_x));
checkCudaErrors(hipFree(min_vid_y));
checkCudaErrors(hipFree(min_vid_z));
checkCudaErrors(hipFree(candidate_voxel_num_per_point));
checkCudaErrors(hipFree(candidate_voxel_id));
checkCudaErrors(hipFree(valid_voxel_mark));
checkCudaErrors(hipFree(valid_points_mark));
checkCudaErrors(hipFree(valid_voxel_count));
checkCudaErrors(hipFree(valid_points_location));
checkCudaErrors(hipFree(valid_voxel_location));
}
/* Build parent nodes from child nodes of the octree */
extern "C" __global__ void buildParent(double *child_centroids, int *points_per_child,
int child_grid_x, int child_grid_y, int child_grid_z, int child_num,
double *parent_centroids, int *points_per_parent,
int parent_grid_x, int parent_grid_y, int parent_grid_z) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int idz = threadIdx.z + blockIdx.z * blockDim.z;
if (idx < parent_grid_x && idy < parent_grid_y && idz < parent_grid_z) {
int parent_idx = idx + idy * parent_grid_x + idz * parent_grid_x * parent_grid_y;
MatrixDevice parent_centr(3, 1, parent_grid_x * parent_grid_y * parent_grid_z,
parent_centroids + parent_idx);
double pc0, pc1, pc2;
int points_num = 0;
double dpoints_num;
pc0 = 0.0;
pc1 = 0.0;
pc2 = 0.0;
for (int i = idx * 2; i < idx * 2 + 2 && i < child_grid_x; i++) {
for (int j = idy * 2; j < idy * 2 + 2 && j < child_grid_y; j++) {
for (int k = idz * 2; k < idz * 2 + 2 && k < child_grid_z; k++) {
int child_idx = i + j * child_grid_x + k * child_grid_x * child_grid_y;
MatrixDevice child_centr(3, 1, child_num, child_centroids + child_idx);
int child_points = points_per_child[child_idx];
double dchild_points = static_cast<double>(child_points);
pc0 += (child_points > 0) ? dchild_points * child_centr(0) : 0.0;
pc1 += (child_points > 0) ? dchild_points * child_centr(1) : 0.0;
pc2 += (child_points > 0) ? dchild_points * child_centr(2) : 0.0;
points_num += (child_points > 0) ? child_points : 0;
__syncthreads();
}
}
}
dpoints_num = static_cast<double>(points_num);
parent_centr(0) = (points_num <= 0) ? DBL_MAX : pc0 / dpoints_num;
parent_centr(1) = (points_num <= 0) ? DBL_MAX : pc1 / dpoints_num;
parent_centr(2) = (points_num <= 0) ? DBL_MAX : pc2 / dpoints_num;
points_per_parent[parent_idx] = points_num;
}
}
/* Compute the number of points per voxel using atomicAdd */
extern "C" __global__ void insertPointsToGrid(float *x, float *y, float *z, int points_num,
int *points_per_voxel, int voxel_num,
int vgrid_x, int vgrid_y, int vgrid_z,
float voxel_x, float voxel_y, float voxel_z,
int min_b_x, int min_b_y, int min_b_z) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < points_num; i += stride) {
float t_x = x[i];
float t_y = y[i];
float t_z = z[i];
int voxel_id = voxelId(t_x, t_y, t_z, voxel_x, voxel_y, voxel_z, min_b_x, min_b_y, min_b_z, vgrid_x,
vgrid_y, vgrid_z);
// Update number of points in the voxel
int ptr_increment = (voxel_id < voxel_num) * voxel_id; // if (voxel_id < voxel_num), then use voxel_id
int incremental_value = (voxel_id < voxel_num);
//atomicAdd(points_per_voxel + voxel_id, 1);
atomicAdd(points_per_voxel + ptr_increment, incremental_value);
}
}
/* Rearrange points to locations corresponding to voxels */
extern "C" __global__ void scatterPointsToVoxels(float *x, float *y, float *z, int points_num, int voxel_num,
float voxel_x, float voxel_y, float voxel_z,
int min_b_x, int min_b_y, int min_b_z,
int vgrid_x, int vgrid_y, int vgrid_z,
int *writing_locations, int *point_ids) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < points_num; i += stride) {
int voxel_id = voxelId(x[i], y[i], z[i], voxel_x, voxel_y, voxel_z,
min_b_x, min_b_y, min_b_z, vgrid_x, vgrid_y, vgrid_z);
int ptr_increment = (voxel_id < voxel_num) * voxel_id;
int incremental_value = (voxel_id < voxel_num);
//int loc = atomicAdd(writing_locations + voxel_id, 1);
int loc = atomicAdd(writing_locations + ptr_increment, incremental_value);
point_ids[loc] = i;
}
}
void GVoxelGrid::scatterPointsToVoxelGrid() {
if (starting_point_ids_ != NULL) {
checkCudaErrors(hipFree(starting_point_ids_));
starting_point_ids_ = NULL;
}
if (point_ids_ != NULL) {
checkCudaErrors(hipFree(point_ids_));
point_ids_ = NULL;
}
int block_x = (points_num_ > BLOCK_SIZE_X) ? BLOCK_SIZE_X : points_num_;
int grid_x = (points_num_ - 1) / block_x + 1;
hipLaunchKernelGGL(( insertPointsToGrid), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_num_, points_per_voxel_, voxel_num_,
vgrid_x_, vgrid_y_, vgrid_z_,
voxel_x_, voxel_y_, voxel_z_,
min_b_x_, min_b_y_, min_b_z_);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMalloc(&starting_point_ids_, sizeof(int) * (voxel_num_ + 1)));
int *writing_location;
checkCudaErrors(hipMalloc(&writing_location, sizeof(int) * voxel_num_));
checkCudaErrors(
hipMemcpy(starting_point_ids_, points_per_voxel_, sizeof(int) * voxel_num_, hipMemcpyDeviceToDevice));
ExclusiveScan(starting_point_ids_, voxel_num_ + 1);
checkCudaErrors(
hipMemcpy(writing_location, starting_point_ids_, sizeof(int) * voxel_num_, hipMemcpyDeviceToDevice));
checkCudaErrors(hipMalloc(&point_ids_, sizeof(int) * points_num_));
hipLaunchKernelGGL(( scatterPointsToVoxels), dim3(grid_x), dim3(block_x), 0, 0, x_, y_, z_, points_num_, voxel_num_,
voxel_x_, voxel_y_, voxel_z_,
min_b_x_, min_b_y_, min_b_z_,
vgrid_x_, vgrid_y_, vgrid_z_,
writing_location, point_ids_);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipFree(writing_location));
}
void GVoxelGrid::buildOctree() {
for (unsigned int i = 1; i < octree_centroids_.size(); i++) {
if (octree_centroids_[i] != NULL) {
checkCudaErrors(hipFree(octree_centroids_[i]));
octree_centroids_[i] = NULL;
}
if (octree_points_per_node_[i] != NULL) {
checkCudaErrors(hipFree(octree_points_per_node_[i]));
octree_points_per_node_[i] = NULL;
}
}
octree_centroids_.clear();
octree_points_per_node_.clear();
octree_grid_size_.clear();
//Push leafs to the octree list
octree_centroids_.push_back(centroid_);
octree_points_per_node_.push_back(points_per_voxel_);
OctreeGridSize grid_size;
grid_size.size_x = vgrid_x_;
grid_size.size_y = vgrid_y_;
grid_size.size_z = vgrid_z_;
octree_grid_size_.push_back(grid_size);
int node_number = voxel_num_;
int child_grid_x, child_grid_y, child_grid_z;
int parent_grid_x, parent_grid_y, parent_grid_z;
int i = 0;
while (node_number > 8) {
//std::cerr<<"while: "<<node_number<<std::endl;
child_grid_x = octree_grid_size_[i].size_x;
child_grid_y = octree_grid_size_[i].size_y;
child_grid_z = octree_grid_size_[i].size_z;
parent_grid_x = (child_grid_x - 1) / 2 + 1;
parent_grid_y = (child_grid_y - 1) / 2 + 1;
parent_grid_z = (child_grid_z - 1) / 2 + 1;
node_number = parent_grid_x * parent_grid_y * parent_grid_z;
double *parent_centroids;
int *points_per_parent;
//std::cerr<<"1 "<<std::endl;
checkCudaErrors(hipMalloc(&parent_centroids, sizeof(double) * 3 * node_number));
checkCudaErrors(hipMalloc(&points_per_parent, sizeof(int) * node_number));
//std::cerr<<"2 "<<std::endl;
double *child_centroids = octree_centroids_[i];
int *points_per_child = octree_points_per_node_[i];
int block_x = (parent_grid_x > BLOCK_X) ? BLOCK_X : parent_grid_x;
int block_y = (parent_grid_y > BLOCK_Y) ? BLOCK_Y : parent_grid_y;
int block_z = (parent_grid_z > BLOCK_Z) ? BLOCK_Z : parent_grid_z;
//std::cerr<<"3 "<<std::endl;
int grid_x = (parent_grid_x - 1) / block_x + 1;
int grid_y = (parent_grid_y - 1) / block_y + 1;
int grid_z = (parent_grid_z - 1) / block_z + 1;
dim3 block(block_x, block_y, block_z);
dim3 grid(grid_x, grid_y, grid_z);
//std::cerr<<"4 "<<std::endl;
hipLaunchKernelGGL(( buildParent), dim3(grid), dim3(block), 0, 0, child_centroids, points_per_child,
child_grid_x, child_grid_y, child_grid_z,
child_grid_x * child_grid_y * child_grid_z,
parent_centroids, points_per_parent,
parent_grid_x, parent_grid_y, parent_grid_z);
//std::cerr<<"5 "<<std::endl;
checkCudaErrors(hipGetLastError());
octree_centroids_.push_back(parent_centroids);
octree_points_per_node_.push_back(points_per_parent);
grid_size.size_x = parent_grid_x;
grid_size.size_y = parent_grid_y;
grid_size.size_z = parent_grid_z;
octree_grid_size_.push_back(grid_size);
//std::cerr<<"6 "<<std::endl;
i++;
//std::cerr<<"end: "<<node_number<<std::endl;
}
//std::cerr<<"buildOctree 1 "<<std::endl;
checkCudaErrors(hipDeviceSynchronize());
//std::cerr<<"buildOctree 2 "<<std::endl;
}
/* Search for the nearest octree node */
extern "C" __global__ void nearestOctreeNodeSearch(float *x, float *y, float *z,
int *vid_x, int *vid_y, int *vid_z,
int points_num,
double *centroids, int *points_per_node,
int vgrid_x, int vgrid_y, int vgrid_z, int node_num) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < points_num; i += stride) {
int vx = vid_x[i];
int vy = vid_y[i];
int vz = vid_z[i];
double min_dist = DBL_MAX;
double t_x = static_cast<double>(x[i]);
double t_y = static_cast<double>(y[i]);
double t_z = static_cast<double>(z[i]);
double cur_dist;
int out_x, out_y, out_z;
out_x = vx;
out_y = vy;
out_z = vz;
double tmp_x, tmp_y, tmp_z;
for (int j = vx * 2; j < vx * 2 + 2 && j < vgrid_x; j++) {
for (int k = vy * 2; k < vy * 2 + 2 && k < vgrid_y; k++) {
for (int l = vz * 2; l < vz * 2 + 2 && l < vgrid_z; l++) {
int node_id = j + k * vgrid_x + l * vgrid_x * vgrid_y;
MatrixDevice node_centr(3, 1, node_num, centroids + node_id);
int points = points_per_node[node_id];
tmp_x = (points > 0) ? node_centr(0) - t_x : DBL_MAX;
tmp_y = (points > 0) ? node_centr(1) - t_y : 0.0;
tmp_z = (points > 0) ? node_centr(2) - t_z : 0.0;
cur_dist = norm3d(tmp_x, tmp_y, tmp_z);
bool res = (cur_dist < min_dist);
out_x = (res) ? j : out_x;
out_y = (res) ? k : out_y;
out_z = (res) ? l : out_z;
min_dist = (res) ? cur_dist : min_dist;
}
}
}
vid_x[i] = out_x;
vid_y[i] = out_y;
vid_z[i] = out_z;
}
}
/* Search for the nearest point from nearest voxel */
extern "C" __global__ void nearestPointSearch(float *qx, float *qy, float *qz, int qpoints_num,
float *rx, float *ry, float *rz, int rpoints_num,
int *vid_x, int *vid_y, int *vid_z,
int vgrid_x, int vgrid_y, int vgrid_z, int voxel_num,
int *starting_point_id, int *point_id, double *min_distance) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < qpoints_num; i += stride) {
int voxel_id = vid_x[i] + vid_y[i] * vgrid_x + vid_z[i] * vgrid_x * vgrid_y;
float cor_qx = qx[i];
float cor_qy = qy[i];
float cor_qz = qz[i];
float min_dist = FLT_MAX;
for (int j = starting_point_id[voxel_id]; j < starting_point_id[voxel_id + 1]; j++) {
int pid = point_id[j];
float cor_rx = rx[pid];
float cor_ry = ry[pid];
float cor_rz = rz[pid];
cor_rx -= cor_qx;
cor_ry -= cor_qy;
cor_rz -= cor_qz;
min_dist = fminf(norm3df(cor_rx, cor_ry, cor_rz), min_dist);
}
min_distance[i] = static_cast<double>(min_dist);
}
}
/* Verify if min distances are really smaller than or equal to max_range */
extern "C" __global__ void
verifyDistances(int *valid_distance, double *min_distance, double max_range, int points_num) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < points_num; i += stride) {
bool check = (min_distance[i] <= max_range);
valid_distance[i] = (check) ? 1 : 0;
if (!check) {
min_distance[i] = 0;
}
}
}
void GVoxelGrid::nearestNeighborSearch(float *trans_x, float *trans_y, float *trans_z, int point_num,
int *valid_distance, double *min_distance, float max_range) {
int *vid_x, *vid_y, *vid_z;
checkCudaErrors(hipMalloc(&vid_x, sizeof(int) * point_num));
checkCudaErrors(hipMalloc(&vid_y, sizeof(int) * point_num));
checkCudaErrors(hipMalloc(&vid_z, sizeof(int) * point_num));
checkCudaErrors(hipMemset(vid_x, 0, sizeof(int) * point_num));
checkCudaErrors(hipMemset(vid_y, 0, sizeof(int) * point_num));
checkCudaErrors(hipMemset(vid_z, 0, sizeof(int) * point_num));
checkCudaErrors(hipDeviceSynchronize());
int block_x = (point_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : point_num;
int grid_x = (point_num - 1) / block_x + 1;
// Go through top of the octree to the bottom
for (int i = octree_centroids_.size() - 1; i >= 0; i--) {
double *centroids = octree_centroids_[i];
int *points_per_node = octree_points_per_node_[i];
int vgrid_x = octree_grid_size_[i].size_x;
int vgrid_y = octree_grid_size_[i].size_y;
int vgrid_z = octree_grid_size_[i].size_z;
int node_num = vgrid_x * vgrid_y * vgrid_z;
hipLaunchKernelGGL(( nearestOctreeNodeSearch), dim3(grid_x), dim3(block_x), 0, 0, trans_x, trans_y, trans_z,
vid_x, vid_y, vid_z,
point_num,
centroids, points_per_node,
vgrid_x, vgrid_y, vgrid_z, node_num);
checkCudaErrors(hipGetLastError());
}
hipLaunchKernelGGL(( nearestPointSearch), dim3(grid_x), dim3(block_x), 0, 0, trans_x, trans_y, trans_z, point_num,
x_, y_, z_, points_num_,
vid_x, vid_y, vid_z,
vgrid_x_, vgrid_y_, vgrid_z_, voxel_num_,
starting_point_ids_, point_ids_,
min_distance);
checkCudaErrors(hipGetLastError());
hipLaunchKernelGGL(( verifyDistances), dim3(grid_x), dim3(block_x), 0, 0, valid_distance, min_distance, max_range, point_num);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipFree(vid_x));
checkCudaErrors(hipFree(vid_y));
checkCudaErrors(hipFree(vid_z));
}
}
| c8a88e1a101d446664a4e329ee06e5cdd9b1b40f.cu | #include "ndt_gpu/VoxelGrid.h"
#include "ndt_gpu/debug.h"
#include "ndt_gpu/common.h"
#include <math.h>
#include <limits>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/scan.h>
#include <thrust/fill.h>
#include <inttypes.h>
#include <vector>
#include <cmath>
#include <stdio.h>
#include <sys/time.h>
#include "ndt_gpu/SymmetricEigenSolver.h"
namespace gpu {
GVoxelGrid::GVoxelGrid() :
x_(NULL),
y_(NULL),
z_(NULL),
points_num_(0),
centroid_(NULL),
covariance_(NULL),
inverse_covariance_(NULL),
points_per_voxel_(NULL),
voxel_num_(0),
max_x_(FLT_MAX),
max_y_(FLT_MAX),
max_z_(FLT_MAX),
min_x_(FLT_MIN),
min_y_(FLT_MIN),
min_z_(FLT_MIN),
voxel_x_(0),
voxel_y_(0),
voxel_z_(0),
max_b_x_(0),
max_b_y_(0),
max_b_z_(0),
min_b_x_(0),
min_b_y_(0),
min_b_z_(0),
vgrid_x_(0),
vgrid_y_(0),
vgrid_z_(0),
min_points_per_voxel_(6),
starting_point_ids_(NULL),
point_ids_(NULL),
is_copied_(false) {
};
GVoxelGrid::GVoxelGrid(const GVoxelGrid &other) {
x_ = other.x_;
y_ = other.y_;
z_ = other.z_;
points_num_ = other.points_num_;
centroid_ = other.centroid_;
covariance_ = other.covariance_;
inverse_covariance_ = other.inverse_covariance_;
points_per_voxel_ = other.points_per_voxel_;
voxel_num_ = other.voxel_num_;
max_x_ = other.max_x_;
max_y_ = other.max_y_;
max_z_ = other.max_z_;
min_x_ = other.min_x_;
min_y_ = other.min_y_;
min_z_ = other.min_z_;
voxel_x_ = other.voxel_x_;
voxel_y_ = other.voxel_y_;
voxel_z_ = other.voxel_z_;
max_b_x_ = other.max_b_x_;
max_b_y_ = other.max_b_y_;
max_b_z_ = other.max_b_z_;
min_b_x_ = other.min_b_x_;
min_b_y_ = other.min_b_y_;
min_b_z_ = other.min_b_z_;
vgrid_x_ = other.vgrid_x_;
vgrid_y_ = other.vgrid_y_;
vgrid_z_ = other.vgrid_z_;
min_points_per_voxel_ = other.min_points_per_voxel_;
starting_point_ids_ = other.starting_point_ids_;
point_ids_ = other.point_ids_;
is_copied_ = true;
}
GVoxelGrid::~GVoxelGrid() {
if (!is_copied_) {
for (unsigned int i = 1; i < octree_centroids_.size(); i++) {
if (octree_centroids_[i] != NULL) {
checkCudaErrors(cudaFree(octree_centroids_[i]));
octree_centroids_[i] = NULL;
}
if (octree_points_per_node_[i] != NULL) {
checkCudaErrors(cudaFree(octree_points_per_node_[i]));
octree_points_per_node_[i] = NULL;
}
}
octree_centroids_.clear();
octree_points_per_node_.clear();
octree_grid_size_.clear();
if (starting_point_ids_ != NULL) {
checkCudaErrors(cudaFree(starting_point_ids_));
starting_point_ids_ = NULL;
}
if (point_ids_ != NULL) {
checkCudaErrors(cudaFree(point_ids_));
point_ids_ = NULL;
}
if (centroid_ != NULL) {
checkCudaErrors(cudaFree(centroid_));
centroid_ = NULL;
}
if (covariance_ != NULL) {
checkCudaErrors(cudaFree(covariance_));
covariance_ = NULL;
}
if (inverse_covariance_ != NULL) {
checkCudaErrors(cudaFree(inverse_covariance_));
inverse_covariance_ = NULL;
}
if (points_per_voxel_ != NULL) {
checkCudaErrors(cudaFree(points_per_voxel_));
points_per_voxel_ = NULL;
}
}
}
void GVoxelGrid::initialize() {
if (centroid_ != NULL) {
checkCudaErrors(cudaFree(centroid_));
centroid_ = NULL;
}
if (covariance_ != NULL) {
checkCudaErrors(cudaFree(covariance_));
covariance_ = NULL;
}
if (inverse_covariance_ != NULL) {
checkCudaErrors(cudaFree(inverse_covariance_));
inverse_covariance_ = NULL;
}
if (points_per_voxel_ != NULL) {
checkCudaErrors(cudaFree(points_per_voxel_));
points_per_voxel_ = NULL;
}
checkCudaErrors(cudaMalloc(¢roid_, sizeof(double) * 3 * voxel_num_));
checkCudaErrors(cudaMalloc(&covariance_, sizeof(double) * 9 * voxel_num_));
checkCudaErrors(cudaMalloc(&inverse_covariance_, sizeof(double) * 9 * voxel_num_));
checkCudaErrors(cudaMalloc(&points_per_voxel_, sizeof(int) * voxel_num_));
checkCudaErrors(cudaMemset(inverse_covariance_, 0, sizeof(double) * 9 * voxel_num_));
checkCudaErrors(cudaMemset(points_per_voxel_, 0, sizeof(int) * voxel_num_));
checkCudaErrors(cudaDeviceSynchronize());
}
int GVoxelGrid::getVoxelNum() const {
return voxel_num_;
}
float GVoxelGrid::getMaxX() const {
return max_x_;
}
float GVoxelGrid::getMaxY() const {
return max_y_;
}
float GVoxelGrid::getMaxZ() const {
return max_z_;
}
float GVoxelGrid::getMinX() const {
return min_x_;
}
float GVoxelGrid::getMinY() const {
return min_y_;
}
float GVoxelGrid::getMinZ() const {
return min_z_;
}
float GVoxelGrid::getVoxelX() const {
return voxel_x_;
}
float GVoxelGrid::getVoxelY() const {
return voxel_y_;
}
float GVoxelGrid::getVoxelZ() const {
return voxel_z_;
}
int GVoxelGrid::getMaxBX() const {
return max_b_x_;
}
int GVoxelGrid::getMaxBY() const {
return max_b_y_;
}
int GVoxelGrid::getMaxBZ() const {
return max_b_z_;
}
int GVoxelGrid::getMinBX() const {
return min_b_x_;
}
int GVoxelGrid::getMinBY() const {
return min_b_y_;
}
int GVoxelGrid::getMinBZ() const {
return min_b_z_;
}
int GVoxelGrid::getVgridX() const {
return vgrid_x_;
}
int GVoxelGrid::getVgridY() const {
return vgrid_y_;
}
int GVoxelGrid::getVgridZ() const {
return vgrid_z_;
}
void GVoxelGrid::setLeafSize(float voxel_x, float voxel_y, float voxel_z) {
voxel_x_ = voxel_x;
voxel_y_ = voxel_y;
voxel_z_ = voxel_z;
}
double *GVoxelGrid::getCentroidList() const {
return centroid_;
}
double *GVoxelGrid::getCovarianceList() const {
return covariance_;
}
double *GVoxelGrid::getInverseCovarianceList() const {
return inverse_covariance_;
}
int *GVoxelGrid::getPointsPerVoxelList() const {
return points_per_voxel_;
}
extern "C" __device__ int voxelId(float x, float y, float z,
float voxel_x, float voxel_y, float voxel_z,
int min_b_x, int min_b_y, int min_b_z,
int vgrid_x, int vgrid_y, int vgrid_z) {
int id_x = static_cast<int>(floorf(x / voxel_x) - static_cast<float>(min_b_x));
int id_y = static_cast<int>(floorf(y / voxel_y) - static_cast<float>(min_b_y));
int id_z = static_cast<int>(floorf(z / voxel_z) - static_cast<float>(min_b_z));
return (id_x + id_y * vgrid_x + id_z * vgrid_x * vgrid_y);
}
/* First step to compute centroids and covariances of voxels. */
extern "C" __global__ void
initCentroidAndCovariance(float *x, float *y, float *z, int *starting_point_ids, int *point_ids,
double *centroids, double *covariances, int voxel_num) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < voxel_num; i += stride) {
MatrixDevice centr(3, 1, voxel_num, centroids + i);
MatrixDevice cov(3, 3, voxel_num, covariances + i);
double centr0, centr1, centr2;
double cov00, cov01, cov02, cov11, cov12, cov22;
centr0 = centr1 = centr2 = 0.0;
cov00 = cov11 = cov22 = 1.0;
cov01 = cov02 = cov12 = 0.0;
for (int j = starting_point_ids[i]; j < starting_point_ids[i + 1]; j++) {
int pid = point_ids[j];
double t_x = static_cast<double>(x[pid]);
double t_y = static_cast<double>(y[pid]);
double t_z = static_cast<double>(z[pid]);
centr0 += t_x;
centr1 += t_y;
centr2 += t_z;
cov00 += t_x * t_x;
cov01 += t_x * t_y;
cov02 += t_x * t_z;
cov11 += t_y * t_y;
cov12 += t_y * t_z;
cov22 += t_z * t_z;
}
centr(0) = centr0;
centr(1) = centr1;
centr(2) = centr2;
cov(0, 0) = cov00;
cov(0, 1) = cov01;
cov(0, 2) = cov02;
cov(1, 1) = cov11;
cov(1, 2) = cov12;
cov(2, 2) = cov22;
}
}
/* Update centroids of voxels. */
extern "C" __global__ void updateVoxelCentroid(double *centroid, int *points_per_voxel, int voxel_num) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int vid = index; vid < voxel_num; vid += stride) {
MatrixDevice centr(3, 1, voxel_num, centroid + vid);
double points_num = static_cast<double>(points_per_voxel[vid]);
if (points_num > 0) {
centr /= points_num;
}
}
}
/* Update covariance of voxels. */
extern "C" __global__ void
updateVoxelCovariance(double *centroid, double *pt_sum, double *covariance, int *points_per_voxel, int voxel_num,
int min_points_per_voxel) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int vid = index; vid < voxel_num; vid += stride) {
MatrixDevice centr(3, 1, voxel_num, centroid + vid);
MatrixDevice cov(3, 3, voxel_num, covariance + vid);
MatrixDevice pts(3, 1, voxel_num, pt_sum + vid);
double points_num = static_cast<double>(points_per_voxel[vid]);
double c0 = centr(0);
double c1 = centr(1);
double c2 = centr(2);
double p0 = pts(0);
double p1 = pts(1);
double p2 = pts(2);
points_per_voxel[vid] = (points_num < min_points_per_voxel) ? 0 : points_num;
if (points_num >= min_points_per_voxel) {
double mult = (points_num - 1.0) / points_num;
cov(0, 0) = ((cov(0, 0) - 2.0 * p0 * c0) / points_num + c0 * c0) * mult;
cov(0, 1) = ((cov(0, 1) - 2.0 * p0 * c1) / points_num + c0 * c1) * mult;
cov(0, 2) = ((cov(0, 2) - 2.0 * p0 * c2) / points_num + c0 * c2) * mult;
cov(1, 0) = cov(0, 1);
cov(1, 1) = ((cov(1, 1) - 2.0 * p1 * c1) / points_num + c1 * c1) * mult;
cov(1, 2) = ((cov(1, 2) - 2.0 * p1 * c2) / points_num + c1 * c2) * mult;
cov(2, 0) = cov(0, 2);
cov(2, 1) = cov(1, 2);
cov(2, 2) = ((cov(2, 2) - 2.0 * p2 * c2) / points_num + c2 * c2) * mult;
}
}
}
extern "C" __global__ void
computeInverseEigenvectors(double *inverse_covariance, int *points_per_voxel, int voxel_num, double *eigenvectors,
int min_points_per_voxel) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int vid = index; vid < voxel_num; vid += stride) {
if (points_per_voxel[vid] >= min_points_per_voxel) {
MatrixDevice icov(3, 3, voxel_num, inverse_covariance + vid);
MatrixDevice eigen_vectors(3, 3, voxel_num, eigenvectors + vid);
eigen_vectors.inverse(icov);
}
__syncthreads();
}
}
//eigen_vecs = eigen_vecs * eigen_val
extern "C" __global__ void
updateCovarianceS0(int *points_per_voxel, int voxel_num, double *eigenvalues, double *eigenvectors,
int min_points_per_voxel) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int vid = index; vid < voxel_num; vid += stride) {
if (points_per_voxel[vid] >= min_points_per_voxel) {
MatrixDevice eigen_vectors(3, 3, voxel_num, eigenvectors + vid);
double eig_val0 = eigenvalues[vid];
double eig_val1 = eigenvalues[vid + voxel_num];
double eig_val2 = eigenvalues[vid + 2 * voxel_num];
eigen_vectors(0, 0) *= eig_val0;
eigen_vectors(1, 0) *= eig_val0;
eigen_vectors(2, 0) *= eig_val0;
eigen_vectors(0, 1) *= eig_val1;
eigen_vectors(1, 1) *= eig_val1;
eigen_vectors(2, 1) *= eig_val1;
eigen_vectors(0, 2) *= eig_val2;
eigen_vectors(1, 2) *= eig_val2;
eigen_vectors(2, 2) *= eig_val2;
}
__syncthreads();
}
}
//cov = new eigen_vecs * eigen_vecs transpose
extern "C" __global__ void
updateCovarianceS1(double *covariance, double *inverse_covariance, int *points_per_voxel, int voxel_num,
double *eigenvectors, int min_points_per_voxel, int col) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int vid = index; vid < voxel_num; vid += stride) {
if (points_per_voxel[vid] >= min_points_per_voxel) {
MatrixDevice cov(3, 3, voxel_num, covariance + vid);
MatrixDevice icov(3, 3, voxel_num, inverse_covariance + vid);
MatrixDevice eigen_vectors(3, 3, voxel_num, eigenvectors + vid);
double tmp0 = icov(0, col);
double tmp1 = icov(1, col);
double tmp2 = icov(2, col);
cov(0, col) = eigen_vectors(0, 0) * tmp0 + eigen_vectors(0, 1) * tmp1 + eigen_vectors(0, 2) * tmp2;
cov(1, col) = eigen_vectors(1, 0) * tmp0 + eigen_vectors(1, 1) * tmp1 + eigen_vectors(1, 2) * tmp2;
cov(2, col) = eigen_vectors(2, 0) * tmp0 + eigen_vectors(2, 1) * tmp1 + eigen_vectors(2, 2) * tmp2;
}
__syncthreads();
}
}
extern "C" __global__ void
computeInverseCovariance(double *covariance, double *inverse_covariance, int *points_per_voxel, int voxel_num,
int min_points_per_voxel) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int vid = index; vid < voxel_num; vid += stride) {
if (points_per_voxel[vid] >= min_points_per_voxel) {
MatrixDevice cov(3, 3, voxel_num, covariance + vid);
MatrixDevice icov(3, 3, voxel_num, inverse_covariance + vid);
cov.inverse(icov);
}
__syncthreads();
}
}
template<typename T>
__global__ void init(T *input, int size, int local_size) {
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < size; i += blockDim.x * gridDim.x) {
for (int j = 0; j < local_size; j++)
input[i + j * size] = 1;
}
}
extern "C" __global__ void initBoolean(bool *input, int size) {
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < size; i += blockDim.x * gridDim.x) {
input[i] = (i % 2 == 0) ? true : false;
}
}
/* Normalize input matrices to avoid overflow. */
extern "C" __global__ void
normalize(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel) {
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel) {}
sv.normalizeInput(id);
__syncthreads();
}
}
/* Compute eigenvalues. Eigenvalues are arranged in increasing order.
* (eigen(0) <= eigen(1) <= eigen(2). */
extern "C" __global__ void
computeEigenvalues(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel) {
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel)
sv.computeEigenvalues(id);
__syncthreads();
}
}
/* First step to compute eigenvector 0 of covariance matrices. */
extern "C" __global__ void
computeEvec00(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel) {
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel)
sv.computeEigenvector00(id);
__syncthreads();
}
}
/* Second step to compute eigenvector 0 of covariance matrices. */
extern "C" __global__ void
computeEvec01(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel) {
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel)
sv.computeEigenvector01(id);
__syncthreads();
}
}
/* First step to compute eigenvector 1 of covariance matrices. */
extern "C" __global__ void
computeEvec10(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel) {
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel)
sv.computeEigenvector10(id);
__syncthreads();
}
}
/* Second step to compute eigenvector 1 of covariance matrices. */
extern "C" __global__ void
computeEvec11(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel) {
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel)
sv.computeEigenvector11(id);
__syncthreads();
}
}
/* Compute eigenvector 2 of covariance matrices. */
extern "C" __global__ void
computeEvec2(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel) {
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel)
sv.computeEigenvector2(id);
__syncthreads();
}
}
/* Final step to compute eigenvalues. */
extern "C" __global__ void
updateEval(SymmetricEigensolver3x3 sv, int *points_per_voxel, int voxel_num, int min_points_per_voxel) {
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel)
sv.updateEigenvalues(id);
__syncthreads();
}
}
/* Update eigenvalues in the case covariance matrix is nearly singular. */
extern "C" __global__ void
updateEval2(double *eigenvalues, int *points_per_voxel, int voxel_num, int min_points_per_voxel) {
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < voxel_num; id += blockDim.x * gridDim.x) {
if (points_per_voxel[id] >= min_points_per_voxel) {
MatrixDevice eigen_val(3, 1, voxel_num, eigenvalues + id);
double ev0 = eigen_val(0);
double ev1 = eigen_val(1);
double ev2 = eigen_val(2);
if (ev0 < 0 || ev1 < 0 || ev2 <= 0) {
points_per_voxel[id] = 0;
continue;
}
double min_cov_eigvalue = ev2 * 0.01;
if (ev0 < min_cov_eigvalue) {
ev0 = min_cov_eigvalue;
if (ev1 < min_cov_eigvalue) {
ev1 = min_cov_eigvalue;
}
}
eigen_val(0) = ev0;
eigen_val(1) = ev1;
eigen_val(2) = ev2;
__syncthreads();
}
}
}
void GVoxelGrid::computeCentroidAndCovariance() {
int block_x = (voxel_num_ > BLOCK_SIZE_X) ? BLOCK_SIZE_X : voxel_num_;
int grid_x = (voxel_num_ - 1) / block_x + 1;
initCentroidAndCovariance<<<grid_x, block_x>>>(x_, y_, z_, starting_point_ids_, point_ids_, centroid_,
covariance_, voxel_num_);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
double *pt_sum;
checkCudaErrors(cudaMalloc(&pt_sum, sizeof(double) * voxel_num_ * 3));
checkCudaErrors(cudaMemcpy(pt_sum, centroid_, sizeof(double) * voxel_num_ * 3, cudaMemcpyDeviceToDevice));
updateVoxelCentroid<<<grid_x, block_x>>>(centroid_, points_per_voxel_, voxel_num_);
checkCudaErrors(cudaGetLastError());
updateVoxelCovariance<<<grid_x, block_x>>>(centroid_, pt_sum, covariance_, points_per_voxel_, voxel_num_,
min_points_per_voxel_);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaFree(pt_sum));
double *eigenvalues_dev, *eigenvectors_dev;
checkCudaErrors(cudaMalloc(&eigenvalues_dev, sizeof(double) * 3 * voxel_num_));
checkCudaErrors(cudaMalloc(&eigenvectors_dev, sizeof(double) * 9 * voxel_num_));
// Solving eigenvalues and eigenvectors problem by the GPU.
SymmetricEigensolver3x3 sv(voxel_num_);
sv.setInputMatrices(covariance_);
sv.setEigenvalues(eigenvalues_dev);
sv.setEigenvectors(eigenvectors_dev);
normalize<<<grid_x, block_x>>>(sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(cudaGetLastError());
computeEigenvalues<<<grid_x, block_x>>>(sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(cudaGetLastError());
computeEvec00<<<grid_x, block_x>>>(sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(cudaGetLastError());
computeEvec01<<<grid_x, block_x>>>(sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(cudaGetLastError());
computeEvec10<<<grid_x, block_x>>>(sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(cudaGetLastError());
computeEvec11<<<grid_x, block_x>>>(sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(cudaGetLastError());
computeEvec2<<<grid_x, block_x>>>(sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(cudaGetLastError());
updateEval<<<grid_x, block_x>>>(sv, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(cudaGetLastError());
updateEval2<<<grid_x, block_x>>>(eigenvalues_dev, points_per_voxel_, voxel_num_, min_points_per_voxel_);
checkCudaErrors(cudaGetLastError());
computeInverseEigenvectors<<<grid_x, block_x>>>(inverse_covariance_, points_per_voxel_, voxel_num_,
eigenvectors_dev, min_points_per_voxel_);
checkCudaErrors(cudaGetLastError());
updateCovarianceS0<<<grid_x, block_x>>>(points_per_voxel_, voxel_num_, eigenvalues_dev, eigenvectors_dev,
min_points_per_voxel_);
checkCudaErrors(cudaGetLastError());
for (int i = 0; i < 3; i++) {
updateCovarianceS1<<<grid_x, block_x>>>(covariance_, inverse_covariance_, points_per_voxel_, voxel_num_,
eigenvectors_dev, min_points_per_voxel_, i);
checkCudaErrors(cudaGetLastError());
}
checkCudaErrors(cudaDeviceSynchronize());
computeInverseCovariance<<<grid_x, block_x>>>(covariance_, inverse_covariance_, points_per_voxel_, voxel_num_,
min_points_per_voxel_);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
sv.memFree();
checkCudaErrors(cudaFree(eigenvalues_dev));
checkCudaErrors(cudaFree(eigenvectors_dev));
}
//Input are supposed to be in device memory
void GVoxelGrid::setInput(float *x, float *y, float *z, int points_num) {
if (points_num <= 0)
return;
x_ = x;
y_ = y;
z_ = z;
points_num_ = points_num;
//std::cerr<<("GVoxelGrid 1")<<std::endl;
findBoundaries();
//std::cerr<<("GVoxelGrid 2")<<std::endl;
voxel_num_ = vgrid_x_ * vgrid_y_ * vgrid_z_;
//std::cerr<<("GVoxelGrid 3")<<std::endl;
initialize();
//std::cerr<<("GVoxelGrid 4")<<std::endl;
scatterPointsToVoxelGrid();
//std::cerr<<("GVoxelGrid 5")<<std::endl;
computeCentroidAndCovariance();
//std::cerr<<("GVoxelGrid 6")<<std::endl;
buildOctree();
//std::cerr<<("GVoxelGrid 7")<<std::endl;
}
/* Find the largest coordinate values */
extern "C" __global__ void findMax(float *x, float *y, float *z, int full_size, int half_size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < half_size; i += stride) {
x[i] = (i + half_size < full_size) ? ((x[i] >= x[i + half_size]) ? x[i] : x[i + half_size]) : x[i];
y[i] = (i + half_size < full_size) ? ((y[i] >= y[i + half_size]) ? y[i] : y[i + half_size]) : y[i];
z[i] = (i + half_size < full_size) ? ((z[i] >= z[i + half_size]) ? z[i] : z[i + half_size]) : z[i];
}
}
/* Find the smallest coordinate values */
extern "C" __global__ void findMin(float *x, float *y, float *z, int full_size, int half_size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < half_size; i += stride) {
x[i] = (i + half_size < full_size) ? ((x[i] <= x[i + half_size]) ? x[i] : x[i + half_size]) : x[i];
y[i] = (i + half_size < full_size) ? ((y[i] <= y[i + half_size]) ? y[i] : y[i + half_size]) : y[i];
z[i] = (i + half_size < full_size) ? ((z[i] <= z[i + half_size]) ? z[i] : z[i + half_size]) : z[i];
}
}
void GVoxelGrid::findBoundaries() {
float *max_x, *max_y, *max_z, *min_x, *min_y, *min_z;
checkCudaErrors(cudaMalloc(&max_x, sizeof(float) * points_num_));
checkCudaErrors(cudaMalloc(&max_y, sizeof(float) * points_num_));
checkCudaErrors(cudaMalloc(&max_z, sizeof(float) * points_num_));
checkCudaErrors(cudaMalloc(&min_x, sizeof(float) * points_num_));
checkCudaErrors(cudaMalloc(&min_y, sizeof(float) * points_num_));
checkCudaErrors(cudaMalloc(&min_z, sizeof(float) * points_num_));
checkCudaErrors(cudaMemcpy(max_x, x_, sizeof(float) * points_num_, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(max_y, y_, sizeof(float) * points_num_, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(max_z, z_, sizeof(float) * points_num_, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(min_x, x_, sizeof(float) * points_num_, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(min_y, y_, sizeof(float) * points_num_, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMemcpy(min_z, z_, sizeof(float) * points_num_, cudaMemcpyDeviceToDevice));
int points_num = points_num_;
while (points_num > 1) {
int half_points_num = (points_num - 1) / 2 + 1;
int block_x = (half_points_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : half_points_num;
int grid_x = (half_points_num - 1) / block_x + 1;
findMax<<<grid_x, block_x>>>(max_x, max_y, max_z, points_num, half_points_num);
checkCudaErrors(cudaGetLastError());
findMin<<<grid_x, block_x>>>(min_x, min_y, min_z, points_num, half_points_num);
checkCudaErrors(cudaGetLastError());
points_num = half_points_num;
}
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(&max_x_, max_x, sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&max_y_, max_y, sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&max_z_, max_z, sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&min_x_, min_x, sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&min_y_, min_y, sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&min_z_, min_z, sizeof(float), cudaMemcpyDeviceToHost));
max_b_x_ = static_cast<int> (floor(max_x_ / voxel_x_));
max_b_y_ = static_cast<int> (floor(max_y_ / voxel_y_));
max_b_z_ = static_cast<int> (floor(max_z_ / voxel_z_));
min_b_x_ = static_cast<int> (floor(min_x_ / voxel_x_));
min_b_y_ = static_cast<int> (floor(min_y_ / voxel_y_));
min_b_z_ = static_cast<int> (floor(min_z_ / voxel_z_));
vgrid_x_ = max_b_x_ - min_b_x_ + 1;
vgrid_y_ = max_b_y_ - min_b_y_ + 1;
vgrid_z_ = max_b_z_ - min_b_z_ + 1;
checkCudaErrors(cudaFree(max_x));
checkCudaErrors(cudaFree(max_y));
checkCudaErrors(cudaFree(max_z));
checkCudaErrors(cudaFree(min_x));
checkCudaErrors(cudaFree(min_y));
checkCudaErrors(cudaFree(min_z));
}
/* Find indexes idx, idy and idz of candidate voxels */
extern "C" __global__ void findBoundariesOfCandidateVoxels(float *x, float *y, float *z,
float radius, int points_num,
float voxel_x, float voxel_y, float voxel_z,
int max_b_x, int max_b_y, int max_b_z,
int min_b_x, int min_b_y, int min_b_z,
int *max_vid_x, int *max_vid_y, int *max_vid_z,
int *min_vid_x, int *min_vid_y, int *min_vid_z,
int *candidate_voxel_per_point) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < points_num; i += stride) {
float t_x = x[i];
float t_y = y[i];
float t_z = z[i];
int max_id_x = static_cast<int>(floorf((t_x + radius) / voxel_x));
int max_id_y = static_cast<int>(floorf((t_y + radius) / voxel_y));
int max_id_z = static_cast<int>(floorf((t_z + radius) / voxel_z));
int min_id_x = static_cast<int>(floorf((t_x - radius) / voxel_x));
int min_id_y = static_cast<int>(floorf((t_y - radius) / voxel_y));
int min_id_z = static_cast<int>(floorf((t_z - radius) / voxel_z));
/* Find intersection of the cube containing
* the NN sphere of the point and the voxel grid
*/
max_id_x = (max_id_x > max_b_x) ? max_b_x - min_b_x : max_id_x - min_b_x;
max_id_y = (max_id_y > max_b_y) ? max_b_y - min_b_y : max_id_y - min_b_y;
max_id_z = (max_id_z > max_b_z) ? max_b_z - min_b_z : max_id_z - min_b_z;
min_id_x = (min_id_x < min_b_x) ? 0 : min_id_x - min_b_x;
min_id_y = (min_id_y < min_b_y) ? 0 : min_id_y - min_b_y;
min_id_z = (min_id_z < min_b_z) ? 0 : min_id_z - min_b_z;
int vx = max_id_x - min_id_x + 1;
int vy = max_id_y - min_id_y + 1;
int vz = max_id_z - min_id_z + 1;
candidate_voxel_per_point[i] = (vx > 0 && vy > 0 && vz > 0) ? vx * vy * vz : 0;
max_vid_x[i] = max_id_x;
max_vid_y[i] = max_id_y;
max_vid_z[i] = max_id_z;
min_vid_x[i] = min_id_x;
min_vid_y[i] = min_id_y;
min_vid_z[i] = min_id_z;
}
}
/* Write id of valid points to the output buffer */
extern "C" __global__ void
collectValidPoints(int *valid_points_mark, int *valid_points_id, int *valid_points_location, int points_num) {
for (int index = threadIdx.x + blockIdx.x * blockDim.x; index < points_num; index += blockDim.x * gridDim.x) {
if (valid_points_mark[index] != 0) {
valid_points_id[valid_points_location[index]] = index;
}
}
}
/* Compute the global index of candidate voxels.
* global index = idx + idy * grid size x + idz * grid_size x * grid size y */
extern "C" __global__ void updateCandidateVoxelIds(int points_num,
int vgrid_x, int vgrid_y, int vgrid_z,
int *max_vid_x, int *max_vid_y, int *max_vid_z,
int *min_vid_x, int *min_vid_y, int *min_vid_z,
int *starting_voxel_id,
int *candidate_voxel_id) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < points_num; i += stride) {
int max_id_x = max_vid_x[i];
int max_id_y = max_vid_y[i];
int max_id_z = max_vid_z[i];
int min_id_x = min_vid_x[i];
int min_id_y = min_vid_y[i];
int min_id_z = min_vid_z[i];
int write_location = starting_voxel_id[i];
for (int j = min_id_x; j <= max_id_x; j++) {
for (int k = min_id_y; k <= max_id_y; k++) {
for (int l = min_id_z; l <= max_id_z; l++) {
candidate_voxel_id[write_location] = j + k * vgrid_x + l * vgrid_x * vgrid_y;
write_location++;
}
}
}
}
}
/* Find out which voxels are really inside the radius.
* This is done by comparing the distance between the centroid
* of the voxel and the query point with the radius.
*
* The valid_voxel_mark store the result of the inspection, which is 0
* if the centroid is outside the radius and 1 otherwise.
*
* The valid_points_mark store the status of the inspection per point.
* It is 0 if there is no voxels in the candidate list is truly a neighbor
* of the point, and 1 otherwise.
*
* The valid_voxel_count store the number of true neighbor voxels.
*/
extern "C" __global__ void inspectCandidateVoxels(float *x, float *y, float *z,
float radius, int max_nn, int points_num,
double *centroid, int *points_per_voxel, int offset,
int *starting_voxel_id, int *candidate_voxel_id,
int *valid_voxel_mark, int *valid_voxel_count,
int *valid_points_mark) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < points_num; i += stride) {
double t_x = static_cast<double>(x[i]);
double t_y = static_cast<double>(y[i]);
double t_z = static_cast<double>(z[i]);
int nn = 0;
for (int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1] && nn <= max_nn; j++) {
int point_num = points_per_voxel[candidate_voxel_id[j]];
MatrixDevice centr(3, 1, offset, centroid + candidate_voxel_id[j]);
double centroid_x = (point_num > 0) ? (t_x - centr(0)) : radius + 1;
double centroid_y = (point_num > 0) ? (t_y - centr(1)) : 0;
double centroid_z = (point_num > 0) ? (t_z - centr(2)) : 0;
bool res = (norm3d(centroid_x, centroid_y, centroid_z) <= radius);
valid_voxel_mark[j] = (res) ? 1 : 0;
nn += (res) ? 1 : 0;
}
valid_voxel_count[i] = nn;
valid_points_mark[i] = (nn > 0) ? 1 : 0;
__syncthreads();
}
}
/* Write the id of valid voxels to the output buffer */
extern "C" __global__ void
collectValidVoxels(int *valid_voxels_mark, int *candidate_voxel_id, int *output, int *writing_location,
int candidate_voxel_num) {
for (int index = threadIdx.x + blockIdx.x * blockDim.x;
index < candidate_voxel_num; index += blockDim.x * gridDim.x) {
if (valid_voxels_mark[index] == 1) {
output[writing_location[index]] = candidate_voxel_id[index];
}
}
}
/* Write the number of valid voxel per point to the output buffer */
extern "C" __global__ void
collectValidVoxelCount(int *input_valid_voxel_count, int *output_valid_voxel_count, int *writing_location,
int points_num) {
for (int id = threadIdx.x + blockIdx.x * blockDim.x; id < points_num; id += blockDim.x * gridDim.x) {
if (input_valid_voxel_count[id] != 0)
output_valid_voxel_count[writing_location[id]] = input_valid_voxel_count[id];
}
}
template<typename T>
void GVoxelGrid::ExclusiveScan(T *input, int ele_num, T *sum) {
thrust::device_ptr <T> dev_ptr(input);
thrust::exclusive_scan(dev_ptr, dev_ptr + ele_num, dev_ptr);
checkCudaErrors(cudaDeviceSynchronize());
*sum = *(dev_ptr + ele_num - 1);
}
template<typename T>
void GVoxelGrid::ExclusiveScan(T *input, int ele_num) {
thrust::device_ptr <T> dev_ptr(input);
thrust::exclusive_scan(dev_ptr, dev_ptr + ele_num, dev_ptr);
checkCudaErrors(cudaDeviceSynchronize());
}
void GVoxelGrid::radiusSearch(float *qx, float *qy, float *qz, int points_num, float radius, int max_nn,
int **valid_points, int **starting_voxel_id, int **valid_voxel_id,
int *valid_voxel_num, int *valid_points_num) {
//Testing input query points
int block_x = (points_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : points_num;
int grid_x = (points_num - 1) / block_x + 1;
//Boundaries of candidate voxels per points
int *max_vid_x, *max_vid_y, *max_vid_z;
int *min_vid_x, *min_vid_y, *min_vid_z;
checkCudaErrors(cudaMalloc(&max_vid_x, sizeof(int) * points_num));
checkCudaErrors(cudaMalloc(&max_vid_y, sizeof(int) * points_num));
checkCudaErrors(cudaMalloc(&max_vid_z, sizeof(int) * points_num));
checkCudaErrors(cudaMalloc(&min_vid_x, sizeof(int) * points_num));
checkCudaErrors(cudaMalloc(&min_vid_y, sizeof(int) * points_num));
checkCudaErrors(cudaMalloc(&min_vid_z, sizeof(int) * points_num));
//Determine the number of candidate voxel per points
int *candidate_voxel_num_per_point;
int total_candidate_voxel_num;
checkCudaErrors(cudaMalloc(&candidate_voxel_num_per_point, sizeof(int) * (points_num + 1)));
findBoundariesOfCandidateVoxels<<<grid_x, block_x>>>(qx, qy, qz, radius, points_num,
voxel_x_, voxel_y_, voxel_z_,
max_b_x_, max_b_y_, max_b_z_,
min_b_x_, min_b_y_, min_b_z_,
max_vid_x, max_vid_y, max_vid_z,
min_vid_x, min_vid_y, min_vid_z,
candidate_voxel_num_per_point);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
//Total candidate voxel num is determined by an exclusive scan on candidate_voxel_num_per_point
ExclusiveScan(candidate_voxel_num_per_point, points_num + 1, &total_candidate_voxel_num);
if (total_candidate_voxel_num <= 0) {
std::cout << "No candidate voxel was found. Exiting..." << std::endl;
checkCudaErrors(cudaFree(max_vid_x));
checkCudaErrors(cudaFree(max_vid_y));
checkCudaErrors(cudaFree(max_vid_z));
checkCudaErrors(cudaFree(min_vid_x));
checkCudaErrors(cudaFree(min_vid_y));
checkCudaErrors(cudaFree(min_vid_z));
checkCudaErrors(cudaFree(candidate_voxel_num_per_point));
valid_points = NULL;
starting_voxel_id = NULL;
valid_voxel_id = NULL;
*valid_voxel_num = 0;
*valid_points_num = 0;
return;
}
//Determine the voxel id of candidate voxels
int *candidate_voxel_id;
checkCudaErrors(cudaMalloc(&candidate_voxel_id, sizeof(int) * total_candidate_voxel_num));
updateCandidateVoxelIds<<<grid_x, block_x>>>(points_num, vgrid_x_, vgrid_y_, vgrid_z_,
max_vid_x, max_vid_y, max_vid_z,
min_vid_x, min_vid_y, min_vid_z,
candidate_voxel_num_per_point, candidate_voxel_id);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
//Go through the candidate voxel id list and find out which voxels are really inside the radius
int *valid_voxel_mark;
checkCudaErrors(cudaMalloc(&valid_voxel_mark, sizeof(int) * total_candidate_voxel_num));
int *valid_voxel_count;
checkCudaErrors(cudaMalloc(&valid_voxel_count, sizeof(int) * (points_num + 1)));
int *valid_points_mark;
checkCudaErrors(cudaMalloc(&valid_points_mark, sizeof(int) * points_num));
block_x = (total_candidate_voxel_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : total_candidate_voxel_num;
grid_x = (total_candidate_voxel_num - 1) / block_x + 1;
///CHECK VALID VOXEL COUNT AGAIN
inspectCandidateVoxels<<<grid_x, block_x>>>(qx, qy, qz, radius, max_nn, points_num,
centroid_, points_per_voxel_, voxel_num_,
candidate_voxel_num_per_point, candidate_voxel_id,
valid_voxel_mark, valid_voxel_count, valid_points_mark);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
//Collect valid points
int *valid_points_location;
checkCudaErrors(cudaMalloc(&valid_points_location, sizeof(int) * (points_num + 1)));
checkCudaErrors(cudaMemset(valid_points_location, 0, sizeof(int) * (points_num + 1)));
checkCudaErrors(cudaMemcpy(valid_points_location, valid_points_mark, sizeof(int) * points_num,
cudaMemcpyDeviceToDevice));
//Writing location to the output buffer is determined by an exclusive scan
ExclusiveScan(valid_points_location, points_num + 1, valid_points_num);
if (*valid_points_num <= 0) {
//std::cout << "No valid point was found. Exiting..." << std::endl;
std::cout << "No valid point was found. Exiting...: " << *valid_points_num << std::endl;
checkCudaErrors(cudaFree(max_vid_x));
checkCudaErrors(cudaFree(max_vid_y));
checkCudaErrors(cudaFree(max_vid_z));
checkCudaErrors(cudaFree(min_vid_x));
checkCudaErrors(cudaFree(min_vid_y));
checkCudaErrors(cudaFree(min_vid_z));
checkCudaErrors(cudaFree(candidate_voxel_num_per_point));
checkCudaErrors(cudaFree(candidate_voxel_id));
checkCudaErrors(cudaFree(valid_voxel_mark));
checkCudaErrors(cudaFree(valid_voxel_count));
checkCudaErrors(cudaFree(valid_points_mark));
checkCudaErrors(cudaFree(valid_points_location));
valid_points = NULL;
starting_voxel_id = NULL;
valid_voxel_id = NULL;
*valid_voxel_num = 0;
*valid_points_num = 0;
return;
}
checkCudaErrors(cudaMalloc(valid_points, sizeof(int) * (*valid_points_num)));
collectValidPoints<<<grid_x, block_x>>>(valid_points_mark, *valid_points, valid_points_location, points_num);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMalloc(starting_voxel_id, sizeof(int) * (*valid_points_num + 1)));
collectValidVoxelCount<<<grid_x, block_x>>>(valid_voxel_count, *starting_voxel_id, valid_points_location,
points_num);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
//Determine the starting location of voxels per points in the valid points list
ExclusiveScan(*starting_voxel_id, *valid_points_num + 1, valid_voxel_num);
//Collect valid voxels
int *valid_voxel_location;
checkCudaErrors(cudaMalloc(&valid_voxel_location, sizeof(int) * (total_candidate_voxel_num + 1)));
checkCudaErrors(cudaMemcpy(valid_voxel_location, valid_voxel_mark, sizeof(int) * total_candidate_voxel_num,
cudaMemcpyDeviceToDevice));
ExclusiveScan(valid_voxel_location, total_candidate_voxel_num + 1, valid_voxel_num);
if (*valid_voxel_num <= 0) {
checkCudaErrors(cudaFree(max_vid_x));
max_vid_x = NULL;
checkCudaErrors(cudaFree(max_vid_y));
max_vid_y = NULL;
checkCudaErrors(cudaFree(max_vid_z));
max_vid_z = NULL;
checkCudaErrors(cudaFree(min_vid_x));
min_vid_x = NULL;
checkCudaErrors(cudaFree(min_vid_y));
min_vid_y = NULL;
checkCudaErrors(cudaFree(min_vid_z));
min_vid_z = NULL;
checkCudaErrors(cudaFree(candidate_voxel_num_per_point));
candidate_voxel_num_per_point = NULL;
checkCudaErrors(cudaFree(candidate_voxel_id));
candidate_voxel_id = NULL;
checkCudaErrors(cudaFree(valid_voxel_mark));
valid_voxel_mark = NULL;
checkCudaErrors(cudaFree(valid_voxel_count));
valid_voxel_count = NULL;
checkCudaErrors(cudaFree(valid_points_mark));
valid_points_mark = NULL;
checkCudaErrors(cudaFree(valid_points_location));
valid_points_location = NULL;
checkCudaErrors(cudaFree(valid_voxel_location));
valid_voxel_location = NULL;
valid_points = NULL;
starting_voxel_id = NULL;
valid_voxel_id = NULL;
*valid_voxel_num = 0;
*valid_points_num = 0;
}
checkCudaErrors(cudaMalloc(valid_voxel_id, sizeof(int) * (*valid_voxel_num)));
block_x = (total_candidate_voxel_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : total_candidate_voxel_num;
grid_x = (total_candidate_voxel_num - 1) / block_x + 1;
collectValidVoxels<<<grid_x, block_x>>>(valid_voxel_mark, candidate_voxel_id, *valid_voxel_id,
valid_voxel_location, total_candidate_voxel_num);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaFree(max_vid_x));
checkCudaErrors(cudaFree(max_vid_y));
checkCudaErrors(cudaFree(max_vid_z));
checkCudaErrors(cudaFree(min_vid_x));
checkCudaErrors(cudaFree(min_vid_y));
checkCudaErrors(cudaFree(min_vid_z));
checkCudaErrors(cudaFree(candidate_voxel_num_per_point));
checkCudaErrors(cudaFree(candidate_voxel_id));
checkCudaErrors(cudaFree(valid_voxel_mark));
checkCudaErrors(cudaFree(valid_points_mark));
checkCudaErrors(cudaFree(valid_voxel_count));
checkCudaErrors(cudaFree(valid_points_location));
checkCudaErrors(cudaFree(valid_voxel_location));
}
/* Build parent nodes from child nodes of the octree */
extern "C" __global__ void buildParent(double *child_centroids, int *points_per_child,
int child_grid_x, int child_grid_y, int child_grid_z, int child_num,
double *parent_centroids, int *points_per_parent,
int parent_grid_x, int parent_grid_y, int parent_grid_z) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
int idz = threadIdx.z + blockIdx.z * blockDim.z;
if (idx < parent_grid_x && idy < parent_grid_y && idz < parent_grid_z) {
int parent_idx = idx + idy * parent_grid_x + idz * parent_grid_x * parent_grid_y;
MatrixDevice parent_centr(3, 1, parent_grid_x * parent_grid_y * parent_grid_z,
parent_centroids + parent_idx);
double pc0, pc1, pc2;
int points_num = 0;
double dpoints_num;
pc0 = 0.0;
pc1 = 0.0;
pc2 = 0.0;
for (int i = idx * 2; i < idx * 2 + 2 && i < child_grid_x; i++) {
for (int j = idy * 2; j < idy * 2 + 2 && j < child_grid_y; j++) {
for (int k = idz * 2; k < idz * 2 + 2 && k < child_grid_z; k++) {
int child_idx = i + j * child_grid_x + k * child_grid_x * child_grid_y;
MatrixDevice child_centr(3, 1, child_num, child_centroids + child_idx);
int child_points = points_per_child[child_idx];
double dchild_points = static_cast<double>(child_points);
pc0 += (child_points > 0) ? dchild_points * child_centr(0) : 0.0;
pc1 += (child_points > 0) ? dchild_points * child_centr(1) : 0.0;
pc2 += (child_points > 0) ? dchild_points * child_centr(2) : 0.0;
points_num += (child_points > 0) ? child_points : 0;
__syncthreads();
}
}
}
dpoints_num = static_cast<double>(points_num);
parent_centr(0) = (points_num <= 0) ? DBL_MAX : pc0 / dpoints_num;
parent_centr(1) = (points_num <= 0) ? DBL_MAX : pc1 / dpoints_num;
parent_centr(2) = (points_num <= 0) ? DBL_MAX : pc2 / dpoints_num;
points_per_parent[parent_idx] = points_num;
}
}
/* Compute the number of points per voxel using atomicAdd */
extern "C" __global__ void insertPointsToGrid(float *x, float *y, float *z, int points_num,
int *points_per_voxel, int voxel_num,
int vgrid_x, int vgrid_y, int vgrid_z,
float voxel_x, float voxel_y, float voxel_z,
int min_b_x, int min_b_y, int min_b_z) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < points_num; i += stride) {
float t_x = x[i];
float t_y = y[i];
float t_z = z[i];
int voxel_id = voxelId(t_x, t_y, t_z, voxel_x, voxel_y, voxel_z, min_b_x, min_b_y, min_b_z, vgrid_x,
vgrid_y, vgrid_z);
// Update number of points in the voxel
int ptr_increment = (voxel_id < voxel_num) * voxel_id; // if (voxel_id < voxel_num), then use voxel_id
int incremental_value = (voxel_id < voxel_num);
//atomicAdd(points_per_voxel + voxel_id, 1);
atomicAdd(points_per_voxel + ptr_increment, incremental_value);
}
}
/* Rearrange points to locations corresponding to voxels */
extern "C" __global__ void scatterPointsToVoxels(float *x, float *y, float *z, int points_num, int voxel_num,
float voxel_x, float voxel_y, float voxel_z,
int min_b_x, int min_b_y, int min_b_z,
int vgrid_x, int vgrid_y, int vgrid_z,
int *writing_locations, int *point_ids) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < points_num; i += stride) {
int voxel_id = voxelId(x[i], y[i], z[i], voxel_x, voxel_y, voxel_z,
min_b_x, min_b_y, min_b_z, vgrid_x, vgrid_y, vgrid_z);
int ptr_increment = (voxel_id < voxel_num) * voxel_id;
int incremental_value = (voxel_id < voxel_num);
//int loc = atomicAdd(writing_locations + voxel_id, 1);
int loc = atomicAdd(writing_locations + ptr_increment, incremental_value);
point_ids[loc] = i;
}
}
void GVoxelGrid::scatterPointsToVoxelGrid() {
if (starting_point_ids_ != NULL) {
checkCudaErrors(cudaFree(starting_point_ids_));
starting_point_ids_ = NULL;
}
if (point_ids_ != NULL) {
checkCudaErrors(cudaFree(point_ids_));
point_ids_ = NULL;
}
int block_x = (points_num_ > BLOCK_SIZE_X) ? BLOCK_SIZE_X : points_num_;
int grid_x = (points_num_ - 1) / block_x + 1;
insertPointsToGrid<<<grid_x, block_x>>>(x_, y_, z_, points_num_, points_per_voxel_, voxel_num_,
vgrid_x_, vgrid_y_, vgrid_z_,
voxel_x_, voxel_y_, voxel_z_,
min_b_x_, min_b_y_, min_b_z_);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMalloc(&starting_point_ids_, sizeof(int) * (voxel_num_ + 1)));
int *writing_location;
checkCudaErrors(cudaMalloc(&writing_location, sizeof(int) * voxel_num_));
checkCudaErrors(
cudaMemcpy(starting_point_ids_, points_per_voxel_, sizeof(int) * voxel_num_, cudaMemcpyDeviceToDevice));
ExclusiveScan(starting_point_ids_, voxel_num_ + 1);
checkCudaErrors(
cudaMemcpy(writing_location, starting_point_ids_, sizeof(int) * voxel_num_, cudaMemcpyDeviceToDevice));
checkCudaErrors(cudaMalloc(&point_ids_, sizeof(int) * points_num_));
scatterPointsToVoxels<<<grid_x, block_x>>>(x_, y_, z_, points_num_, voxel_num_,
voxel_x_, voxel_y_, voxel_z_,
min_b_x_, min_b_y_, min_b_z_,
vgrid_x_, vgrid_y_, vgrid_z_,
writing_location, point_ids_);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaFree(writing_location));
}
void GVoxelGrid::buildOctree() {
for (unsigned int i = 1; i < octree_centroids_.size(); i++) {
if (octree_centroids_[i] != NULL) {
checkCudaErrors(cudaFree(octree_centroids_[i]));
octree_centroids_[i] = NULL;
}
if (octree_points_per_node_[i] != NULL) {
checkCudaErrors(cudaFree(octree_points_per_node_[i]));
octree_points_per_node_[i] = NULL;
}
}
octree_centroids_.clear();
octree_points_per_node_.clear();
octree_grid_size_.clear();
//Push leafs to the octree list
octree_centroids_.push_back(centroid_);
octree_points_per_node_.push_back(points_per_voxel_);
OctreeGridSize grid_size;
grid_size.size_x = vgrid_x_;
grid_size.size_y = vgrid_y_;
grid_size.size_z = vgrid_z_;
octree_grid_size_.push_back(grid_size);
int node_number = voxel_num_;
int child_grid_x, child_grid_y, child_grid_z;
int parent_grid_x, parent_grid_y, parent_grid_z;
int i = 0;
while (node_number > 8) {
//std::cerr<<"while: "<<node_number<<std::endl;
child_grid_x = octree_grid_size_[i].size_x;
child_grid_y = octree_grid_size_[i].size_y;
child_grid_z = octree_grid_size_[i].size_z;
parent_grid_x = (child_grid_x - 1) / 2 + 1;
parent_grid_y = (child_grid_y - 1) / 2 + 1;
parent_grid_z = (child_grid_z - 1) / 2 + 1;
node_number = parent_grid_x * parent_grid_y * parent_grid_z;
double *parent_centroids;
int *points_per_parent;
//std::cerr<<"1 "<<std::endl;
checkCudaErrors(cudaMalloc(&parent_centroids, sizeof(double) * 3 * node_number));
checkCudaErrors(cudaMalloc(&points_per_parent, sizeof(int) * node_number));
//std::cerr<<"2 "<<std::endl;
double *child_centroids = octree_centroids_[i];
int *points_per_child = octree_points_per_node_[i];
int block_x = (parent_grid_x > BLOCK_X) ? BLOCK_X : parent_grid_x;
int block_y = (parent_grid_y > BLOCK_Y) ? BLOCK_Y : parent_grid_y;
int block_z = (parent_grid_z > BLOCK_Z) ? BLOCK_Z : parent_grid_z;
//std::cerr<<"3 "<<std::endl;
int grid_x = (parent_grid_x - 1) / block_x + 1;
int grid_y = (parent_grid_y - 1) / block_y + 1;
int grid_z = (parent_grid_z - 1) / block_z + 1;
dim3 block(block_x, block_y, block_z);
dim3 grid(grid_x, grid_y, grid_z);
//std::cerr<<"4 "<<std::endl;
buildParent<<<grid, block>>>(child_centroids, points_per_child,
child_grid_x, child_grid_y, child_grid_z,
child_grid_x * child_grid_y * child_grid_z,
parent_centroids, points_per_parent,
parent_grid_x, parent_grid_y, parent_grid_z);
//std::cerr<<"5 "<<std::endl;
checkCudaErrors(cudaGetLastError());
octree_centroids_.push_back(parent_centroids);
octree_points_per_node_.push_back(points_per_parent);
grid_size.size_x = parent_grid_x;
grid_size.size_y = parent_grid_y;
grid_size.size_z = parent_grid_z;
octree_grid_size_.push_back(grid_size);
//std::cerr<<"6 "<<std::endl;
i++;
//std::cerr<<"end: "<<node_number<<std::endl;
}
//std::cerr<<"buildOctree 1 "<<std::endl;
checkCudaErrors(cudaDeviceSynchronize());
//std::cerr<<"buildOctree 2 "<<std::endl;
}
/* Search for the nearest octree node */
extern "C" __global__ void nearestOctreeNodeSearch(float *x, float *y, float *z,
int *vid_x, int *vid_y, int *vid_z,
int points_num,
double *centroids, int *points_per_node,
int vgrid_x, int vgrid_y, int vgrid_z, int node_num) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < points_num; i += stride) {
int vx = vid_x[i];
int vy = vid_y[i];
int vz = vid_z[i];
double min_dist = DBL_MAX;
double t_x = static_cast<double>(x[i]);
double t_y = static_cast<double>(y[i]);
double t_z = static_cast<double>(z[i]);
double cur_dist;
int out_x, out_y, out_z;
out_x = vx;
out_y = vy;
out_z = vz;
double tmp_x, tmp_y, tmp_z;
for (int j = vx * 2; j < vx * 2 + 2 && j < vgrid_x; j++) {
for (int k = vy * 2; k < vy * 2 + 2 && k < vgrid_y; k++) {
for (int l = vz * 2; l < vz * 2 + 2 && l < vgrid_z; l++) {
int node_id = j + k * vgrid_x + l * vgrid_x * vgrid_y;
MatrixDevice node_centr(3, 1, node_num, centroids + node_id);
int points = points_per_node[node_id];
tmp_x = (points > 0) ? node_centr(0) - t_x : DBL_MAX;
tmp_y = (points > 0) ? node_centr(1) - t_y : 0.0;
tmp_z = (points > 0) ? node_centr(2) - t_z : 0.0;
cur_dist = norm3d(tmp_x, tmp_y, tmp_z);
bool res = (cur_dist < min_dist);
out_x = (res) ? j : out_x;
out_y = (res) ? k : out_y;
out_z = (res) ? l : out_z;
min_dist = (res) ? cur_dist : min_dist;
}
}
}
vid_x[i] = out_x;
vid_y[i] = out_y;
vid_z[i] = out_z;
}
}
/* Search for the nearest point from nearest voxel */
extern "C" __global__ void nearestPointSearch(float *qx, float *qy, float *qz, int qpoints_num,
float *rx, float *ry, float *rz, int rpoints_num,
int *vid_x, int *vid_y, int *vid_z,
int vgrid_x, int vgrid_y, int vgrid_z, int voxel_num,
int *starting_point_id, int *point_id, double *min_distance) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < qpoints_num; i += stride) {
int voxel_id = vid_x[i] + vid_y[i] * vgrid_x + vid_z[i] * vgrid_x * vgrid_y;
float cor_qx = qx[i];
float cor_qy = qy[i];
float cor_qz = qz[i];
float min_dist = FLT_MAX;
for (int j = starting_point_id[voxel_id]; j < starting_point_id[voxel_id + 1]; j++) {
int pid = point_id[j];
float cor_rx = rx[pid];
float cor_ry = ry[pid];
float cor_rz = rz[pid];
cor_rx -= cor_qx;
cor_ry -= cor_qy;
cor_rz -= cor_qz;
min_dist = fminf(norm3df(cor_rx, cor_ry, cor_rz), min_dist);
}
min_distance[i] = static_cast<double>(min_dist);
}
}
/* Verify if min distances are really smaller than or equal to max_range */
extern "C" __global__ void
verifyDistances(int *valid_distance, double *min_distance, double max_range, int points_num) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < points_num; i += stride) {
bool check = (min_distance[i] <= max_range);
valid_distance[i] = (check) ? 1 : 0;
if (!check) {
min_distance[i] = 0;
}
}
}
void GVoxelGrid::nearestNeighborSearch(float *trans_x, float *trans_y, float *trans_z, int point_num,
int *valid_distance, double *min_distance, float max_range) {
int *vid_x, *vid_y, *vid_z;
checkCudaErrors(cudaMalloc(&vid_x, sizeof(int) * point_num));
checkCudaErrors(cudaMalloc(&vid_y, sizeof(int) * point_num));
checkCudaErrors(cudaMalloc(&vid_z, sizeof(int) * point_num));
checkCudaErrors(cudaMemset(vid_x, 0, sizeof(int) * point_num));
checkCudaErrors(cudaMemset(vid_y, 0, sizeof(int) * point_num));
checkCudaErrors(cudaMemset(vid_z, 0, sizeof(int) * point_num));
checkCudaErrors(cudaDeviceSynchronize());
int block_x = (point_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : point_num;
int grid_x = (point_num - 1) / block_x + 1;
// Go through top of the octree to the bottom
for (int i = octree_centroids_.size() - 1; i >= 0; i--) {
double *centroids = octree_centroids_[i];
int *points_per_node = octree_points_per_node_[i];
int vgrid_x = octree_grid_size_[i].size_x;
int vgrid_y = octree_grid_size_[i].size_y;
int vgrid_z = octree_grid_size_[i].size_z;
int node_num = vgrid_x * vgrid_y * vgrid_z;
nearestOctreeNodeSearch<<<grid_x, block_x>>>(trans_x, trans_y, trans_z,
vid_x, vid_y, vid_z,
point_num,
centroids, points_per_node,
vgrid_x, vgrid_y, vgrid_z, node_num);
checkCudaErrors(cudaGetLastError());
}
nearestPointSearch<<<grid_x, block_x>>>(trans_x, trans_y, trans_z, point_num,
x_, y_, z_, points_num_,
vid_x, vid_y, vid_z,
vgrid_x_, vgrid_y_, vgrid_z_, voxel_num_,
starting_point_ids_, point_ids_,
min_distance);
checkCudaErrors(cudaGetLastError());
verifyDistances<<<grid_x, block_x>>>(valid_distance, min_distance, max_range, point_num);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaFree(vid_x));
checkCudaErrors(cudaFree(vid_y));
checkCudaErrors(cudaFree(vid_z));
}
}
|
6d811d4e828ab78b7f47661cb53cda4258b86a60.hip | // !!! This is a file automatically generated by hipify!!!
#include <cmath>
#include <hip/hip_runtime.h>
#include "filtering_cuda.cuh"
#include "../filtering.h"
#include "../image_buffer.h"
#include "../image_exception.h"
#include "../parameter_validation.h"
namespace Image_Function_Cuda
{
Image Gaussian( const Image & in, uint32_t kernelSize, float sigma )
{
Image_Function::ParameterValidation( in );
ImageCuda out( in.width(), in.height() );
Gaussian( in, out, kernelSize, sigma );
return out;
}
void Gaussian( const Image & in, Image & out, uint32_t kernelSize, float sigma )
{
Image_Function::ParameterValidation( in, out );
if( sigma < 0 )
throw imageException( "Sigma value cannot be negative" );
FFT_Cuda::ComplexData image( in );
FFT_Cuda::ComplexData filter = GetGaussianKernel( in.width(), in.height(), kernelSize, sigma );
FFT_Cuda::FFTExecutor executor( in.width(), in.height() );
executor.directTransform( image );
executor.directTransform( filter );
executor.complexMultiplication( image, filter, image );
executor.inverseTransform( image );
out = image.get();
}
FFT_Cuda::ComplexData GetGaussianKernel( uint32_t width, uint32_t height, uint32_t kernelSize, float sigma )
{
std::vector<float> data;
Image_Function::GetGaussianKernel( data, width, height, kernelSize, sigma );
multiCuda::Array<float> cudaData( data );
FFT_Cuda::ComplexData complexData;
complexData.resize( width, height );
complexData.set( cudaData );
return complexData;
}
}
| 6d811d4e828ab78b7f47661cb53cda4258b86a60.cu | #include <cmath>
#include <cuda_runtime.h>
#include "filtering_cuda.cuh"
#include "../filtering.h"
#include "../image_buffer.h"
#include "../image_exception.h"
#include "../parameter_validation.h"
namespace Image_Function_Cuda
{
Image Gaussian( const Image & in, uint32_t kernelSize, float sigma )
{
Image_Function::ParameterValidation( in );
ImageCuda out( in.width(), in.height() );
Gaussian( in, out, kernelSize, sigma );
return out;
}
void Gaussian( const Image & in, Image & out, uint32_t kernelSize, float sigma )
{
Image_Function::ParameterValidation( in, out );
if( sigma < 0 )
throw imageException( "Sigma value cannot be negative" );
FFT_Cuda::ComplexData image( in );
FFT_Cuda::ComplexData filter = GetGaussianKernel( in.width(), in.height(), kernelSize, sigma );
FFT_Cuda::FFTExecutor executor( in.width(), in.height() );
executor.directTransform( image );
executor.directTransform( filter );
executor.complexMultiplication( image, filter, image );
executor.inverseTransform( image );
out = image.get();
}
FFT_Cuda::ComplexData GetGaussianKernel( uint32_t width, uint32_t height, uint32_t kernelSize, float sigma )
{
std::vector<float> data;
Image_Function::GetGaussianKernel( data, width, height, kernelSize, sigma );
multiCuda::Array<float> cudaData( data );
FFT_Cuda::ComplexData complexData;
complexData.resize( width, height );
complexData.set( cudaData );
return complexData;
}
}
|
209b8cf1835c1059ca3f03c4b9a45e26c0017e3e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "_bcnn_forward_depthwise_conv_weight_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int nthreads = 1;
float *src_data = NULL;
hipMalloc(&src_data, XSIZE*YSIZE);
float *weight_data = NULL;
hipMalloc(&weight_data, XSIZE*YSIZE);
int channels = 1;
int dst_h = 1;
int dst_w = 1;
int src_h = 1;
int src_w = 1;
int kernel_sz = 1;
int stride = 2;
int pad = 2;
float *dst_data = NULL;
hipMalloc(&dst_data, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
_bcnn_forward_depthwise_conv_weight_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, nthreads,src_data,weight_data,channels,dst_h,dst_w,src_h,src_w,kernel_sz,stride,pad,dst_data);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
_bcnn_forward_depthwise_conv_weight_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, nthreads,src_data,weight_data,channels,dst_h,dst_w,src_h,src_w,kernel_sz,stride,pad,dst_data);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
_bcnn_forward_depthwise_conv_weight_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, nthreads,src_data,weight_data,channels,dst_h,dst_w,src_h,src_w,kernel_sz,stride,pad,dst_data);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 209b8cf1835c1059ca3f03c4b9a45e26c0017e3e.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "_bcnn_forward_depthwise_conv_weight_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int nthreads = 1;
float *src_data = NULL;
cudaMalloc(&src_data, XSIZE*YSIZE);
float *weight_data = NULL;
cudaMalloc(&weight_data, XSIZE*YSIZE);
int channels = 1;
int dst_h = 1;
int dst_w = 1;
int src_h = 1;
int src_w = 1;
int kernel_sz = 1;
int stride = 2;
int pad = 2;
float *dst_data = NULL;
cudaMalloc(&dst_data, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
_bcnn_forward_depthwise_conv_weight_kernel<<<gridBlock,threadBlock>>>(nthreads,src_data,weight_data,channels,dst_h,dst_w,src_h,src_w,kernel_sz,stride,pad,dst_data);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
_bcnn_forward_depthwise_conv_weight_kernel<<<gridBlock,threadBlock>>>(nthreads,src_data,weight_data,channels,dst_h,dst_w,src_h,src_w,kernel_sz,stride,pad,dst_data);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
_bcnn_forward_depthwise_conv_weight_kernel<<<gridBlock,threadBlock>>>(nthreads,src_data,weight_data,channels,dst_h,dst_w,src_h,src_w,kernel_sz,stride,pad,dst_data);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
d47ee9c1489c8ca7fa727bec852c48f0641b539a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "parquet_gpu.h"
#include <io/utilities/block_utils.cuh>
namespace cudf {
namespace io {
namespace parquet {
namespace gpu {
struct dict_state_s
{
uint32_t row_cnt;
PageFragment *cur_fragment;
uint32_t *hashmap;
uint32_t total_dict_entries; //!< Total number of entries in dictionary
uint32_t dictionary_size; //!< Total dictionary size in bytes
uint32_t num_dict_entries; //!< Dictionary entries in current fragment to add
uint32_t frag_dict_size;
EncColumnChunk ck;
EncColumnDesc col;
PageFragment frag;
volatile uint32_t scratch_red[32];
uint16_t frag_dict[MAX_PAGE_FRAGMENT_SIZE];
};
/**
* @brief Computes a 16-bit dictionary hash
**/
inline __device__ uint32_t uint32_hash16(uint32_t v)
{
return (v + (v >> 16)) & 0xffff;
}
inline __device__ uint32_t uint64_hash16(uint64_t v)
{
return uint32_hash16((uint32_t)(v + (v >> 32)));
}
inline __device__ uint32_t nvstr_hash16(const uint8_t *p, uint32_t len)
{
uint32_t hash = len;
if (len > 0) {
uint32_t align_p = 3 & reinterpret_cast<uintptr_t>(p);
const uint32_t *p32 = reinterpret_cast<const uint32_t *>(p - align_p);
uint32_t ofs = align_p * 8;
uint32_t v;
while (len > 4) {
v = *p32++;
if (ofs) {
v = __funnelshift_r(v, *p32, ofs);
}
hash = __funnelshift_l(hash, hash, 5) + v;
len -= 4;
}
v = *p32;
if (ofs) {
v = __funnelshift_r(v, (ofs + len > 4) ? p32[1] : 0, ofs);
}
v &= ((2 << (len * 8 - 1)) - 1);
hash = __funnelshift_l(hash, hash, 5) + v;
}
return uint32_hash16(hash);
}
/**
* @brief Fetch a page fragment and its dictionary entries in row-ascending order
*
* @param[in,out] s dictionary state
* @param[in,out] dict_data fragment dictionary data for the current column (zeroed out after fetching)
* @param[in] frag_start_row row position of current fragment
* @param[in] t thread id
**/
__device__ void FetchDictionaryFragment(dict_state_s *s, uint32_t *dict_data, uint32_t frag_start_row, uint32_t t)
{
if (t < sizeof(PageFragment) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->frag)[t] = reinterpret_cast<const uint32_t *>(s->cur_fragment)[t];
}
__syncthreads();
// Store the row values in shared mem and set the corresponding dict_data to zero (end-of-list)
// It's easiest to do this here since we're only dealing with values all within a 5K-row window
for (uint32_t i = t; i < s->frag.num_dict_vals; i += 1024) {
uint32_t r = dict_data[frag_start_row + i] - frag_start_row;
s->frag_dict[i] = r;
}
__syncthreads();
for (uint32_t i = t; i < s->frag.num_dict_vals; i += 1024) {
uint32_t r = s->frag_dict[i];
dict_data[frag_start_row + r] = 0;
}
__syncthreads();
}
/// Generate dictionary indices in ascending row order
__device__ void GenerateDictionaryIndices(dict_state_s *s, uint32_t t)
{
uint32_t *dict_index = s->col.dict_index;
uint32_t *dict_data = s->col.dict_data + s->ck.start_row;
const uint32_t *valid_map = s->col.valid_map_base;
uint32_t num_dict_entries = 0;
for (uint32_t i = 0; i < s->row_cnt; i += 1024) {
uint32_t row = s->ck.start_row + i + t;
uint32_t is_valid = (i + t < s->row_cnt && row < s->col.num_rows) ? (valid_map) ? (valid_map[row >> 5] >> (row & 0x1f)) & 1 : 1 : 0;
uint32_t dict_idx = (is_valid) ? dict_index[row] : 0;
uint32_t is_unique = (is_valid && dict_idx == row); // Any value that doesn't have bit31 set should have dict_idx=row at this point
uint32_t umask = BALLOT(is_unique);
uint32_t pos = num_dict_entries + __popc(umask & ((1 << (t & 0x1f)) - 1));
if (!(t & 0x1f)) {
s->scratch_red[t >> 5] = __popc(umask);
}
num_dict_entries += __syncthreads_count(is_unique);
if (t < 32) {
s->scratch_red[t] = WarpReducePos32(s->scratch_red[t], t);
}
__syncthreads();
if (t >= 32) {
pos += s->scratch_red[(t - 32) >> 5];
}
if (is_valid && is_unique) {
dict_data[pos] = row;
dict_index[row] = pos;
}
__syncthreads();
if (is_valid && !is_unique) {
// NOTE: Should have at most 3 iterations (once for early duplicate elimination, once for final dictionary duplicate elimination and once for re-ordering)
// (If something went wrong building the dictionary, it will likely hang or crash right here)
do {
dict_idx = dict_index[dict_idx & 0x7fffffff];
} while (dict_idx > 0x7fffffff);
dict_index[row] = dict_idx;
}
}
}
// blockDim(1024, 1, 1)
__global__ void __launch_bounds__(1024, 1)
gpuBuildChunkDictionaries(EncColumnChunk *chunks, uint32_t *dev_scratch)
{
__shared__ __align__(8) dict_state_s state_g;
dict_state_s * const s = &state_g;
uint32_t t = threadIdx.x;
uint32_t dtype, dtype_len, dtype_len_in;
if (t < sizeof(EncColumnChunk) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->ck)[t] = reinterpret_cast<const uint32_t *>(&chunks[blockIdx.x])[t];
}
__syncthreads();
if (!s->ck.has_dictionary) {
return;
}
if (t < sizeof(EncColumnDesc) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->col)[t] = reinterpret_cast<const uint32_t *>(s->ck.col_desc)[t];
}
__syncthreads();
if (!t) {
s->hashmap = dev_scratch + s->ck.dictionary_id * (size_t)(1 << kDictHashBits);
s->row_cnt = 0;
s->cur_fragment = s->ck.fragments;
s->total_dict_entries = 0;
s->dictionary_size = 0;
s->ck.num_dict_fragments = 0;
}
dtype = s->col.physical_type;
dtype_len = (dtype == INT64 || dtype == DOUBLE) ? 8 : 4;
if (dtype == INT32) {
uint32_t converted_type = s->col.converted_type;
dtype_len_in = (converted_type == INT_8) ? 1 : (converted_type == INT_16) ? 2 : 4;
}
else {
dtype_len_in = (dtype == BYTE_ARRAY) ? sizeof(nvstrdesc_s) : dtype_len;
}
__syncthreads();
while (s->row_cnt < s->ck.num_rows) {
uint32_t frag_start_row = s->ck.start_row + s->row_cnt, num_dict_entries, frag_dict_size;
FetchDictionaryFragment(s, s->col.dict_data, frag_start_row, t);
num_dict_entries = s->frag.num_dict_vals;
if (!t) {
s->num_dict_entries = 0;
s->frag_dict_size = 0;
}
for (uint32_t i = 0; i < num_dict_entries; i += 1024) {
bool is_valid = (i + t < num_dict_entries);
uint32_t len = 0;
uint32_t is_dupe = 0;
uint32_t row, hash, next, *next_addr;
uint32_t new_dict_entries;
if (is_valid) {
row = frag_start_row + s->frag_dict[i + t];
len = dtype_len;
if (dtype == BYTE_ARRAY) {
const char *ptr = reinterpret_cast<const nvstrdesc_s *>(s->col.column_data_base)[row].ptr;
uint32_t count = (uint32_t)reinterpret_cast<const nvstrdesc_s *>(s->col.column_data_base)[row].count;
len += count;
hash = nvstr_hash16(reinterpret_cast<const uint8_t *>(ptr), count);
// Walk the list of rows with the same hash
next_addr = &s->hashmap[hash];
while ((next = atomicCAS(next_addr, 0, row + 1)) != 0) {
const char *ptr2 = reinterpret_cast<const nvstrdesc_s *>(s->col.column_data_base)[next - 1].ptr;
uint32_t count2 = reinterpret_cast<const nvstrdesc_s *>(s->col.column_data_base)[next - 1].count;
if (count2 == count && nvstr_is_equal(ptr, count, ptr2, count2)) {
is_dupe = 1;
break;
}
next_addr = &s->col.dict_data[next - 1];
}
}
else {
uint64_t val;
if (dtype_len_in == 8) {
val = reinterpret_cast<const uint64_t *>(s->col.column_data_base)[row];
hash = uint64_hash16(val);
}
else {
val = (dtype_len_in == 4) ? reinterpret_cast<const uint32_t *>(s->col.column_data_base)[row] :
(dtype_len_in == 2) ? reinterpret_cast<const uint16_t *>(s->col.column_data_base)[row] :
reinterpret_cast<const uint8_t *>(s->col.column_data_base)[row];
hash = uint32_hash16(val);
}
// Walk the list of rows with the same hash
next_addr = &s->hashmap[hash];
while ((next = atomicCAS(next_addr, 0, row + 1)) != 0) {
uint64_t val2 = (dtype_len_in == 8) ? reinterpret_cast<const uint64_t *>(s->col.column_data_base)[next - 1] :
(dtype_len_in == 4) ? reinterpret_cast<const uint32_t *>(s->col.column_data_base)[next - 1] :
(dtype_len_in == 2) ? reinterpret_cast<const uint16_t *>(s->col.column_data_base)[next - 1] :
reinterpret_cast<const uint8_t *>(s->col.column_data_base)[next - 1];
if (val2 == val) {
is_dupe = 1;
break;
}
next_addr = &s->col.dict_data[next - 1];
}
}
}
// Count the non-duplicate entries
frag_dict_size = WarpReduceSum32((is_valid && !is_dupe) ? len : 0);
if (!(t & 0x1f)) {
s->scratch_red[t >> 5] = frag_dict_size;
}
new_dict_entries = __syncthreads_count(is_valid && !is_dupe);
if (t < 32) {
frag_dict_size = WarpReduceSum32(s->scratch_red[t]);
if (t == 0) {
s->frag_dict_size += frag_dict_size;
s->num_dict_entries += new_dict_entries;
}
}
if (is_valid) {
if (!is_dupe) {
s->col.dict_index[row] = row;
}
else {
s->col.dict_index[row] = (next - 1) | (1u << 31);
}
}
__syncthreads();
// At this point the order of dictionary is non-deterministic, so reorder the duplicate rows such that the lowest
// row number is the non-duplicate value
if (is_valid && is_dupe && next - 1 > row) {
atomicMin(&s->col.dict_index[next - 1], row);
}
__syncthreads();
if (is_valid && is_dupe && next - 1 > row) {
if (s->col.dict_index[next - 1] == row) {
s->col.dict_index[next - 1] = row | (1u << 31);
s->col.dict_index[row] = row;
}
}
__syncthreads();
}
__syncthreads();
num_dict_entries = s->num_dict_entries;
frag_dict_size = s->frag_dict_size;
if (s->total_dict_entries + num_dict_entries > 65536 || s->dictionary_size + frag_dict_size > 512*1024) {
break;
}
__syncthreads();
if (!t) {
if (num_dict_entries != s->frag.num_dict_vals) {
s->cur_fragment->num_dict_vals = num_dict_entries;
}
if (frag_dict_size != s->frag.dict_data_size) {
s->frag.dict_data_size = frag_dict_size;
}
s->total_dict_entries += num_dict_entries;
s->dictionary_size += frag_dict_size;
s->row_cnt += s->frag.num_rows;
s->cur_fragment++;
s->ck.num_dict_fragments++;
}
__syncthreads();
}
__syncthreads();
GenerateDictionaryIndices(s, t);
if (!t) {
chunks[blockIdx.x].num_dict_fragments = s->ck.num_dict_fragments;
chunks[blockIdx.x].dictionary_size = s->dictionary_size;
chunks[blockIdx.x].total_dict_entries = s->total_dict_entries;
}
}
/**
* @brief Launches kernel for building chunk dictionaries
*
* @param[in] chunks Column chunks
* @param[in] dev_scratch Device scratch data (kDictScratchSize per dictionary)
* @param[in] num_chunks Number of column chunks
* @param[in] stream CUDA stream to use, default 0
*
* @return hipSuccess if successful, a CUDA error code otherwise
**/
hipError_t BuildChunkDictionaries(EncColumnChunk *chunks, uint32_t *dev_scratch, size_t scratch_size, uint32_t num_chunks, hipStream_t stream)
{
if (num_chunks > 0 && scratch_size > 0) { // zero scratch size implies no dictionaries
hipMemsetAsync(dev_scratch, 0, scratch_size, stream);
hipLaunchKernelGGL(( gpuBuildChunkDictionaries) , dim3(num_chunks), dim3(1024), 0, stream , chunks, dev_scratch);
}
return hipSuccess;
}
} // namespace gpu
} // namespace parquet
} // namespace io
} // namespace cudf
| d47ee9c1489c8ca7fa727bec852c48f0641b539a.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "parquet_gpu.h"
#include <io/utilities/block_utils.cuh>
namespace cudf {
namespace io {
namespace parquet {
namespace gpu {
struct dict_state_s
{
uint32_t row_cnt;
PageFragment *cur_fragment;
uint32_t *hashmap;
uint32_t total_dict_entries; //!< Total number of entries in dictionary
uint32_t dictionary_size; //!< Total dictionary size in bytes
uint32_t num_dict_entries; //!< Dictionary entries in current fragment to add
uint32_t frag_dict_size;
EncColumnChunk ck;
EncColumnDesc col;
PageFragment frag;
volatile uint32_t scratch_red[32];
uint16_t frag_dict[MAX_PAGE_FRAGMENT_SIZE];
};
/**
* @brief Computes a 16-bit dictionary hash
**/
inline __device__ uint32_t uint32_hash16(uint32_t v)
{
return (v + (v >> 16)) & 0xffff;
}
inline __device__ uint32_t uint64_hash16(uint64_t v)
{
return uint32_hash16((uint32_t)(v + (v >> 32)));
}
inline __device__ uint32_t nvstr_hash16(const uint8_t *p, uint32_t len)
{
uint32_t hash = len;
if (len > 0) {
uint32_t align_p = 3 & reinterpret_cast<uintptr_t>(p);
const uint32_t *p32 = reinterpret_cast<const uint32_t *>(p - align_p);
uint32_t ofs = align_p * 8;
uint32_t v;
while (len > 4) {
v = *p32++;
if (ofs) {
v = __funnelshift_r(v, *p32, ofs);
}
hash = __funnelshift_l(hash, hash, 5) + v;
len -= 4;
}
v = *p32;
if (ofs) {
v = __funnelshift_r(v, (ofs + len > 4) ? p32[1] : 0, ofs);
}
v &= ((2 << (len * 8 - 1)) - 1);
hash = __funnelshift_l(hash, hash, 5) + v;
}
return uint32_hash16(hash);
}
/**
* @brief Fetch a page fragment and its dictionary entries in row-ascending order
*
* @param[in,out] s dictionary state
* @param[in,out] dict_data fragment dictionary data for the current column (zeroed out after fetching)
* @param[in] frag_start_row row position of current fragment
* @param[in] t thread id
**/
__device__ void FetchDictionaryFragment(dict_state_s *s, uint32_t *dict_data, uint32_t frag_start_row, uint32_t t)
{
if (t < sizeof(PageFragment) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->frag)[t] = reinterpret_cast<const uint32_t *>(s->cur_fragment)[t];
}
__syncthreads();
// Store the row values in shared mem and set the corresponding dict_data to zero (end-of-list)
// It's easiest to do this here since we're only dealing with values all within a 5K-row window
for (uint32_t i = t; i < s->frag.num_dict_vals; i += 1024) {
uint32_t r = dict_data[frag_start_row + i] - frag_start_row;
s->frag_dict[i] = r;
}
__syncthreads();
for (uint32_t i = t; i < s->frag.num_dict_vals; i += 1024) {
uint32_t r = s->frag_dict[i];
dict_data[frag_start_row + r] = 0;
}
__syncthreads();
}
/// Generate dictionary indices in ascending row order
__device__ void GenerateDictionaryIndices(dict_state_s *s, uint32_t t)
{
uint32_t *dict_index = s->col.dict_index;
uint32_t *dict_data = s->col.dict_data + s->ck.start_row;
const uint32_t *valid_map = s->col.valid_map_base;
uint32_t num_dict_entries = 0;
for (uint32_t i = 0; i < s->row_cnt; i += 1024) {
uint32_t row = s->ck.start_row + i + t;
uint32_t is_valid = (i + t < s->row_cnt && row < s->col.num_rows) ? (valid_map) ? (valid_map[row >> 5] >> (row & 0x1f)) & 1 : 1 : 0;
uint32_t dict_idx = (is_valid) ? dict_index[row] : 0;
uint32_t is_unique = (is_valid && dict_idx == row); // Any value that doesn't have bit31 set should have dict_idx=row at this point
uint32_t umask = BALLOT(is_unique);
uint32_t pos = num_dict_entries + __popc(umask & ((1 << (t & 0x1f)) - 1));
if (!(t & 0x1f)) {
s->scratch_red[t >> 5] = __popc(umask);
}
num_dict_entries += __syncthreads_count(is_unique);
if (t < 32) {
s->scratch_red[t] = WarpReducePos32(s->scratch_red[t], t);
}
__syncthreads();
if (t >= 32) {
pos += s->scratch_red[(t - 32) >> 5];
}
if (is_valid && is_unique) {
dict_data[pos] = row;
dict_index[row] = pos;
}
__syncthreads();
if (is_valid && !is_unique) {
// NOTE: Should have at most 3 iterations (once for early duplicate elimination, once for final dictionary duplicate elimination and once for re-ordering)
// (If something went wrong building the dictionary, it will likely hang or crash right here)
do {
dict_idx = dict_index[dict_idx & 0x7fffffff];
} while (dict_idx > 0x7fffffff);
dict_index[row] = dict_idx;
}
}
}
// blockDim(1024, 1, 1)
__global__ void __launch_bounds__(1024, 1)
gpuBuildChunkDictionaries(EncColumnChunk *chunks, uint32_t *dev_scratch)
{
__shared__ __align__(8) dict_state_s state_g;
dict_state_s * const s = &state_g;
uint32_t t = threadIdx.x;
uint32_t dtype, dtype_len, dtype_len_in;
if (t < sizeof(EncColumnChunk) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->ck)[t] = reinterpret_cast<const uint32_t *>(&chunks[blockIdx.x])[t];
}
__syncthreads();
if (!s->ck.has_dictionary) {
return;
}
if (t < sizeof(EncColumnDesc) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->col)[t] = reinterpret_cast<const uint32_t *>(s->ck.col_desc)[t];
}
__syncthreads();
if (!t) {
s->hashmap = dev_scratch + s->ck.dictionary_id * (size_t)(1 << kDictHashBits);
s->row_cnt = 0;
s->cur_fragment = s->ck.fragments;
s->total_dict_entries = 0;
s->dictionary_size = 0;
s->ck.num_dict_fragments = 0;
}
dtype = s->col.physical_type;
dtype_len = (dtype == INT64 || dtype == DOUBLE) ? 8 : 4;
if (dtype == INT32) {
uint32_t converted_type = s->col.converted_type;
dtype_len_in = (converted_type == INT_8) ? 1 : (converted_type == INT_16) ? 2 : 4;
}
else {
dtype_len_in = (dtype == BYTE_ARRAY) ? sizeof(nvstrdesc_s) : dtype_len;
}
__syncthreads();
while (s->row_cnt < s->ck.num_rows) {
uint32_t frag_start_row = s->ck.start_row + s->row_cnt, num_dict_entries, frag_dict_size;
FetchDictionaryFragment(s, s->col.dict_data, frag_start_row, t);
num_dict_entries = s->frag.num_dict_vals;
if (!t) {
s->num_dict_entries = 0;
s->frag_dict_size = 0;
}
for (uint32_t i = 0; i < num_dict_entries; i += 1024) {
bool is_valid = (i + t < num_dict_entries);
uint32_t len = 0;
uint32_t is_dupe = 0;
uint32_t row, hash, next, *next_addr;
uint32_t new_dict_entries;
if (is_valid) {
row = frag_start_row + s->frag_dict[i + t];
len = dtype_len;
if (dtype == BYTE_ARRAY) {
const char *ptr = reinterpret_cast<const nvstrdesc_s *>(s->col.column_data_base)[row].ptr;
uint32_t count = (uint32_t)reinterpret_cast<const nvstrdesc_s *>(s->col.column_data_base)[row].count;
len += count;
hash = nvstr_hash16(reinterpret_cast<const uint8_t *>(ptr), count);
// Walk the list of rows with the same hash
next_addr = &s->hashmap[hash];
while ((next = atomicCAS(next_addr, 0, row + 1)) != 0) {
const char *ptr2 = reinterpret_cast<const nvstrdesc_s *>(s->col.column_data_base)[next - 1].ptr;
uint32_t count2 = reinterpret_cast<const nvstrdesc_s *>(s->col.column_data_base)[next - 1].count;
if (count2 == count && nvstr_is_equal(ptr, count, ptr2, count2)) {
is_dupe = 1;
break;
}
next_addr = &s->col.dict_data[next - 1];
}
}
else {
uint64_t val;
if (dtype_len_in == 8) {
val = reinterpret_cast<const uint64_t *>(s->col.column_data_base)[row];
hash = uint64_hash16(val);
}
else {
val = (dtype_len_in == 4) ? reinterpret_cast<const uint32_t *>(s->col.column_data_base)[row] :
(dtype_len_in == 2) ? reinterpret_cast<const uint16_t *>(s->col.column_data_base)[row] :
reinterpret_cast<const uint8_t *>(s->col.column_data_base)[row];
hash = uint32_hash16(val);
}
// Walk the list of rows with the same hash
next_addr = &s->hashmap[hash];
while ((next = atomicCAS(next_addr, 0, row + 1)) != 0) {
uint64_t val2 = (dtype_len_in == 8) ? reinterpret_cast<const uint64_t *>(s->col.column_data_base)[next - 1] :
(dtype_len_in == 4) ? reinterpret_cast<const uint32_t *>(s->col.column_data_base)[next - 1] :
(dtype_len_in == 2) ? reinterpret_cast<const uint16_t *>(s->col.column_data_base)[next - 1] :
reinterpret_cast<const uint8_t *>(s->col.column_data_base)[next - 1];
if (val2 == val) {
is_dupe = 1;
break;
}
next_addr = &s->col.dict_data[next - 1];
}
}
}
// Count the non-duplicate entries
frag_dict_size = WarpReduceSum32((is_valid && !is_dupe) ? len : 0);
if (!(t & 0x1f)) {
s->scratch_red[t >> 5] = frag_dict_size;
}
new_dict_entries = __syncthreads_count(is_valid && !is_dupe);
if (t < 32) {
frag_dict_size = WarpReduceSum32(s->scratch_red[t]);
if (t == 0) {
s->frag_dict_size += frag_dict_size;
s->num_dict_entries += new_dict_entries;
}
}
if (is_valid) {
if (!is_dupe) {
s->col.dict_index[row] = row;
}
else {
s->col.dict_index[row] = (next - 1) | (1u << 31);
}
}
__syncthreads();
// At this point the order of dictionary is non-deterministic, so reorder the duplicate rows such that the lowest
// row number is the non-duplicate value
if (is_valid && is_dupe && next - 1 > row) {
atomicMin(&s->col.dict_index[next - 1], row);
}
__syncthreads();
if (is_valid && is_dupe && next - 1 > row) {
if (s->col.dict_index[next - 1] == row) {
s->col.dict_index[next - 1] = row | (1u << 31);
s->col.dict_index[row] = row;
}
}
__syncthreads();
}
__syncthreads();
num_dict_entries = s->num_dict_entries;
frag_dict_size = s->frag_dict_size;
if (s->total_dict_entries + num_dict_entries > 65536 || s->dictionary_size + frag_dict_size > 512*1024) {
break;
}
__syncthreads();
if (!t) {
if (num_dict_entries != s->frag.num_dict_vals) {
s->cur_fragment->num_dict_vals = num_dict_entries;
}
if (frag_dict_size != s->frag.dict_data_size) {
s->frag.dict_data_size = frag_dict_size;
}
s->total_dict_entries += num_dict_entries;
s->dictionary_size += frag_dict_size;
s->row_cnt += s->frag.num_rows;
s->cur_fragment++;
s->ck.num_dict_fragments++;
}
__syncthreads();
}
__syncthreads();
GenerateDictionaryIndices(s, t);
if (!t) {
chunks[blockIdx.x].num_dict_fragments = s->ck.num_dict_fragments;
chunks[blockIdx.x].dictionary_size = s->dictionary_size;
chunks[blockIdx.x].total_dict_entries = s->total_dict_entries;
}
}
/**
* @brief Launches kernel for building chunk dictionaries
*
* @param[in] chunks Column chunks
* @param[in] dev_scratch Device scratch data (kDictScratchSize per dictionary)
* @param[in] num_chunks Number of column chunks
* @param[in] stream CUDA stream to use, default 0
*
* @return cudaSuccess if successful, a CUDA error code otherwise
**/
cudaError_t BuildChunkDictionaries(EncColumnChunk *chunks, uint32_t *dev_scratch, size_t scratch_size, uint32_t num_chunks, cudaStream_t stream)
{
if (num_chunks > 0 && scratch_size > 0) { // zero scratch size implies no dictionaries
cudaMemsetAsync(dev_scratch, 0, scratch_size, stream);
gpuBuildChunkDictionaries <<< num_chunks, 1024, 0, stream >>>(chunks, dev_scratch);
}
return cudaSuccess;
}
} // namespace gpu
} // namespace parquet
} // namespace io
} // namespace cudf
|
2bdebe1153e9eabd7e806e7f1fe197cd309230c8.hip | // !!! This is a file automatically generated by hipify!!!
#include <thrust/gather.h>
#include <thrust/iterator/discard_iterator.h>
#include "cupoch/geometry/kdtree_flann.h"
#include "cupoch/geometry/pointcloud.h"
#include "cupoch/utility/console.h"
#include "cupoch/utility/helper.h"
#include "cupoch/utility/platform.h"
#include "cupoch/utility/range.h"
using namespace cupoch;
using namespace cupoch::geometry;
namespace {
void SelectByIndexImpl(const geometry::PointCloud &src,
geometry::PointCloud &dst,
const utility::device_vector<size_t> &indices) {
const bool has_normals = src.HasNormals();
const bool has_colors = src.HasColors();
if (has_normals) dst.normals_.resize(indices.size());
if (has_colors) dst.colors_.resize(indices.size());
dst.points_.resize(indices.size());
thrust::gather(utility::exec_policy(utility::GetStream(0))
->on(utility::GetStream(0)),
indices.begin(), indices.end(), src.points_.begin(),
dst.points_.begin());
if (has_normals) {
thrust::gather(utility::exec_policy(utility::GetStream(1))
->on(utility::GetStream(1)),
indices.begin(), indices.end(), src.normals_.begin(),
dst.normals_.begin());
}
if (has_colors) {
thrust::gather(utility::exec_policy(utility::GetStream(2))
->on(utility::GetStream(2)),
indices.begin(), indices.end(), src.colors_.begin(),
dst.colors_.begin());
}
cudaSafeCall(hipDeviceSynchronize());
}
struct compute_key_functor {
compute_key_functor(const Eigen::Vector3f &voxel_min_bound,
float voxel_size)
: voxel_min_bound_(voxel_min_bound), voxel_size_(voxel_size){};
const Eigen::Vector3f voxel_min_bound_;
const float voxel_size_;
__device__ Eigen::Vector3i operator()(const Eigen::Vector3f &pt) {
auto ref_coord = (pt - voxel_min_bound_) / voxel_size_;
return Eigen::Vector3i(int(floor(ref_coord(0))),
int(floor(ref_coord(1))),
int(floor(ref_coord(2))));
}
};
template <typename OutputIterator, class... Args>
__host__ int CalcAverageByKey(utility::device_vector<Eigen::Vector3i> &keys,
OutputIterator buf_begins,
OutputIterator output_begins) {
const size_t n = keys.size();
thrust::sort_by_key(keys.begin(), keys.end(), buf_begins);
utility::device_vector<int> counts(n);
auto end1 = thrust::reduce_by_key(
keys.begin(), keys.end(), thrust::make_constant_iterator(1),
thrust::make_discard_iterator(), counts.begin());
int n_out = thrust::distance(counts.begin(), end1.second);
counts.resize(n_out);
thrust::equal_to<Eigen::Vector3i> binary_pred;
add_tuple_functor<Args...> add_func;
auto end2 = thrust::reduce_by_key(keys.begin(), keys.end(), buf_begins,
thrust::make_discard_iterator(),
output_begins, binary_pred, add_func);
devide_tuple_functor<Args...> dv_func;
thrust::transform(output_begins, output_begins + n_out, counts.begin(),
output_begins, dv_func);
return n_out;
}
struct has_radius_points_functor {
has_radius_points_functor(const int *indices, int n_points, int knn)
: indices_(indices), n_points_(n_points), knn_(knn){};
const int *indices_;
const int n_points_;
const int knn_;
__device__ bool operator()(int idx) const {
int count = 0;
for (int i = 0; i < knn_; ++i) {
if (indices_[idx * knn_ + i] >= 0) count++;
}
return (count > n_points_);
}
};
struct average_distance_functor {
average_distance_functor(const float *distance, int knn)
: distance_(distance), knn_(knn){};
const float *distance_;
const int knn_;
__device__ float operator()(int idx) const {
int count = 0;
float avg = 0;
for (int i = 0; i < knn_; ++i) {
const float d = distance_[idx * knn_ + i];
if (isinf(d) || d < 0.0) continue;
avg += d;
count++;
}
return (count == 0) ? -1.0 : avg / (float)count;
}
};
struct check_distance_threshold_functor {
check_distance_threshold_functor(const float *distances,
float distance_threshold)
: distances_(distances), distance_threshold_(distance_threshold){};
const float *distances_;
const float distance_threshold_;
__device__ bool operator()(int idx) const {
return (distances_[idx] > 0 && distances_[idx] < distance_threshold_);
}
};
} // namespace
std::shared_ptr<PointCloud> PointCloud::SelectByIndex(
const utility::device_vector<size_t> &indices, bool invert) const {
auto output = std::make_shared<PointCloud>();
if (invert) {
size_t n_out = points_.size() - indices.size();
utility::device_vector<size_t> sorted_indices = indices;
thrust::sort(sorted_indices.begin(), sorted_indices.end());
utility::device_vector<size_t> inv_indices(n_out);
thrust::set_difference(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(points_.size()),
sorted_indices.begin(), sorted_indices.end(),
inv_indices.begin());
SelectByIndexImpl(*this, *output, inv_indices);
} else {
SelectByIndexImpl(*this, *output, indices);
}
return output;
}
std::shared_ptr<PointCloud> PointCloud::VoxelDownSample(
float voxel_size) const {
auto output = std::make_shared<PointCloud>();
if (voxel_size <= 0.0) {
utility::LogWarning("[VoxelDownSample] voxel_size <= 0.\n");
return output;
}
const Eigen::Vector3f voxel_size3 =
Eigen::Vector3f(voxel_size, voxel_size, voxel_size);
const Eigen::Vector3f voxel_min_bound = GetMinBound() - voxel_size3 * 0.5;
const Eigen::Vector3f voxel_max_bound = GetMaxBound() + voxel_size3 * 0.5;
if (voxel_size * std::numeric_limits<int>::max() <
(voxel_max_bound - voxel_min_bound).maxCoeff()) {
utility::LogWarning("[VoxelDownSample] voxel_size is too small.\n");
return output;
}
const int n = points_.size();
const bool has_normals = HasNormals();
const bool has_colors = HasColors();
compute_key_functor ck_func(voxel_min_bound, voxel_size);
utility::device_vector<Eigen::Vector3i> keys(n);
thrust::transform(points_.begin(), points_.end(), keys.begin(), ck_func);
utility::device_vector<Eigen::Vector3f> sorted_points = points_;
output->points_.resize(n);
if (!has_normals && !has_colors) {
typedef thrust::tuple<utility::device_vector<Eigen::Vector3f>::iterator>
IteratorTuple;
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
auto n_out = CalcAverageByKey<ZipIterator, Eigen::Vector3f>(
keys, make_tuple_begin(sorted_points),
make_tuple_begin(output->points_));
output->points_.resize(n_out);
} else if (has_normals && !has_colors) {
utility::device_vector<Eigen::Vector3f> sorted_normals = normals_;
output->normals_.resize(n);
typedef thrust::tuple<utility::device_vector<Eigen::Vector3f>::iterator,
utility::device_vector<Eigen::Vector3f>::iterator>
IteratorTuple;
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
auto n_out =
CalcAverageByKey<ZipIterator, Eigen::Vector3f, Eigen::Vector3f>(
keys,
make_tuple_begin(sorted_points, sorted_normals),
make_tuple_begin(output->points_, output->normals_));
resize_all(n_out, output->points_, output->normals_);
thrust::for_each(
output->normals_.begin(), output->normals_.end(),
[] __device__(Eigen::Vector3f & nl) { nl.normalize(); });
} else if (!has_normals && has_colors) {
utility::device_vector<Eigen::Vector3f> sorted_colors = colors_;
output->colors_.resize(n);
typedef thrust::tuple<utility::device_vector<Eigen::Vector3f>::iterator,
utility::device_vector<Eigen::Vector3f>::iterator>
IteratorTuple;
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
auto n_out =
CalcAverageByKey<ZipIterator, Eigen::Vector3f, Eigen::Vector3f>(
keys,
make_tuple_begin(sorted_points, sorted_colors),
make_tuple_begin(output->points_, output->colors_));
resize_all(n_out, output->points_, output->colors_);
} else {
utility::device_vector<Eigen::Vector3f> sorted_normals = normals_;
utility::device_vector<Eigen::Vector3f> sorted_colors = colors_;
output->normals_.resize(n);
output->colors_.resize(n);
typedef thrust::tuple<utility::device_vector<Eigen::Vector3f>::iterator,
utility::device_vector<Eigen::Vector3f>::iterator,
utility::device_vector<Eigen::Vector3f>::iterator>
IteratorTuple;
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
auto n_out = CalcAverageByKey<ZipIterator, Eigen::Vector3f,
Eigen::Vector3f, Eigen::Vector3f>(
keys,
make_tuple_begin(sorted_points, sorted_normals,
sorted_colors),
make_tuple_begin(output->points_, output->normals_,
output->colors_));
resize_all(n_out, output->points_, output->normals_, output->colors_);
thrust::for_each(
output->normals_.begin(), output->normals_.end(),
[] __device__(Eigen::Vector3f & nl) { nl.normalize(); });
}
utility::LogDebug(
"Pointcloud down sampled from {:d} points to {:d} points.\n",
(int)points_.size(), (int)output->points_.size());
return output;
}
std::shared_ptr<PointCloud> PointCloud::UniformDownSample(
size_t every_k_points) const {
const bool has_normals = HasNormals();
const bool has_colors = HasColors();
auto output = std::make_shared<PointCloud>();
if (every_k_points == 0) {
utility::LogError("[UniformDownSample] Illegal sample rate.");
return output;
}
const int n_out = points_.size() / every_k_points;
output->points_.resize(n_out);
if (has_normals) output->normals_.resize(n_out);
if (has_colors) output->colors_.resize(n_out);
thrust::strided_range<
utility::device_vector<Eigen::Vector3f>::const_iterator>
range_points(points_.begin(), points_.end(), every_k_points);
thrust::copy(utility::exec_policy(utility::GetStream(0))
->on(utility::GetStream(0)),
range_points.begin(), range_points.end(),
output->points_.begin());
if (has_normals) {
thrust::strided_range<
utility::device_vector<Eigen::Vector3f>::const_iterator>
range_normals(normals_.begin(), normals_.end(), every_k_points);
thrust::copy(utility::exec_policy(utility::GetStream(1))
->on(utility::GetStream(1)),
range_normals.begin(), range_normals.end(),
output->normals_.begin());
}
if (has_colors) {
thrust::strided_range<
utility::device_vector<Eigen::Vector3f>::const_iterator>
range_colors(colors_.begin(), colors_.end(), every_k_points);
thrust::copy(utility::exec_policy(utility::GetStream(2))
->on(utility::GetStream(2)),
range_colors.begin(), range_colors.end(),
output->colors_.begin());
}
cudaSafeCall(hipDeviceSynchronize());
return output;
}
std::tuple<std::shared_ptr<PointCloud>, utility::device_vector<size_t>>
PointCloud::RemoveRadiusOutliers(size_t nb_points, float search_radius) const {
if (nb_points < 1 || search_radius <= 0) {
utility::LogError(
"[RemoveRadiusOutliers] Illegal input parameters,"
"number of points and radius must be positive");
}
KDTreeFlann kdtree;
kdtree.SetGeometry(*this);
utility::device_vector<int> tmp_indices;
utility::device_vector<float> dist;
kdtree.SearchRadius(points_, search_radius, tmp_indices, dist);
const size_t n_pt = points_.size();
utility::device_vector<size_t> indices(n_pt);
has_radius_points_functor func(thrust::raw_pointer_cast(tmp_indices.data()),
nb_points, NUM_MAX_NN);
auto end = thrust::copy_if(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(n_pt),
indices.begin(), func);
indices.resize(thrust::distance(indices.begin(), end));
return std::make_tuple(SelectByIndex(indices), indices);
}
std::tuple<std::shared_ptr<PointCloud>, utility::device_vector<size_t>>
PointCloud::RemoveStatisticalOutliers(size_t nb_neighbors,
float std_ratio) const {
if (nb_neighbors < 1 || std_ratio <= 0) {
utility::LogError(
"[RemoveStatisticalOutliers] Illegal input parameters, number "
"of neighbors and standard deviation ratio must be positive");
}
if (points_.empty()) {
return std::make_tuple(std::make_shared<PointCloud>(),
utility::device_vector<size_t>());
}
KDTreeFlann kdtree;
kdtree.SetGeometry(*this);
const int n_pt = points_.size();
utility::device_vector<float> avg_distances(n_pt);
utility::device_vector<size_t> indices(n_pt);
utility::device_vector<int> tmp_indices;
utility::device_vector<float> dist;
kdtree.SearchKNN(points_, int(nb_neighbors), tmp_indices, dist);
average_distance_functor avg_func(thrust::raw_pointer_cast(dist.data()),
nb_neighbors);
thrust::transform(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator((size_t)n_pt),
avg_distances.begin(), avg_func);
const size_t valid_distances =
thrust::count_if(avg_distances.begin(), avg_distances.end(),
[] __device__(float x) { return (x >= 0.0); });
if (valid_distances == 0) {
return std::make_tuple(std::make_shared<PointCloud>(),
utility::device_vector<size_t>());
}
float cloud_mean =
thrust::reduce(avg_distances.begin(), avg_distances.end(), 0.0,
[] __device__(float const &x, float const &y) {
return (y > 0) ? x + y : x;
});
cloud_mean /= valid_distances;
const float sq_sum = thrust::transform_reduce(
avg_distances.begin(), avg_distances.end(),
[cloud_mean] __device__(const float x) {
return (x > 0) ? (x - cloud_mean) * (x - cloud_mean) : 0;
},
0.0, thrust::plus<float>());
// Bessel's correction
const float std_dev = std::sqrt(sq_sum / (valid_distances - 1));
const float distance_threshold = cloud_mean + std_ratio * std_dev;
check_distance_threshold_functor th_func(
thrust::raw_pointer_cast(avg_distances.data()), distance_threshold);
auto end = thrust::copy_if(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator((size_t)n_pt),
indices.begin(), th_func);
indices.resize(thrust::distance(indices.begin(), end));
return std::make_tuple(SelectByIndex(indices), indices);
}
| 2bdebe1153e9eabd7e806e7f1fe197cd309230c8.cu | #include <thrust/gather.h>
#include <thrust/iterator/discard_iterator.h>
#include "cupoch/geometry/kdtree_flann.h"
#include "cupoch/geometry/pointcloud.h"
#include "cupoch/utility/console.h"
#include "cupoch/utility/helper.h"
#include "cupoch/utility/platform.h"
#include "cupoch/utility/range.h"
using namespace cupoch;
using namespace cupoch::geometry;
namespace {
void SelectByIndexImpl(const geometry::PointCloud &src,
geometry::PointCloud &dst,
const utility::device_vector<size_t> &indices) {
const bool has_normals = src.HasNormals();
const bool has_colors = src.HasColors();
if (has_normals) dst.normals_.resize(indices.size());
if (has_colors) dst.colors_.resize(indices.size());
dst.points_.resize(indices.size());
thrust::gather(utility::exec_policy(utility::GetStream(0))
->on(utility::GetStream(0)),
indices.begin(), indices.end(), src.points_.begin(),
dst.points_.begin());
if (has_normals) {
thrust::gather(utility::exec_policy(utility::GetStream(1))
->on(utility::GetStream(1)),
indices.begin(), indices.end(), src.normals_.begin(),
dst.normals_.begin());
}
if (has_colors) {
thrust::gather(utility::exec_policy(utility::GetStream(2))
->on(utility::GetStream(2)),
indices.begin(), indices.end(), src.colors_.begin(),
dst.colors_.begin());
}
cudaSafeCall(cudaDeviceSynchronize());
}
struct compute_key_functor {
compute_key_functor(const Eigen::Vector3f &voxel_min_bound,
float voxel_size)
: voxel_min_bound_(voxel_min_bound), voxel_size_(voxel_size){};
const Eigen::Vector3f voxel_min_bound_;
const float voxel_size_;
__device__ Eigen::Vector3i operator()(const Eigen::Vector3f &pt) {
auto ref_coord = (pt - voxel_min_bound_) / voxel_size_;
return Eigen::Vector3i(int(floor(ref_coord(0))),
int(floor(ref_coord(1))),
int(floor(ref_coord(2))));
}
};
template <typename OutputIterator, class... Args>
__host__ int CalcAverageByKey(utility::device_vector<Eigen::Vector3i> &keys,
OutputIterator buf_begins,
OutputIterator output_begins) {
const size_t n = keys.size();
thrust::sort_by_key(keys.begin(), keys.end(), buf_begins);
utility::device_vector<int> counts(n);
auto end1 = thrust::reduce_by_key(
keys.begin(), keys.end(), thrust::make_constant_iterator(1),
thrust::make_discard_iterator(), counts.begin());
int n_out = thrust::distance(counts.begin(), end1.second);
counts.resize(n_out);
thrust::equal_to<Eigen::Vector3i> binary_pred;
add_tuple_functor<Args...> add_func;
auto end2 = thrust::reduce_by_key(keys.begin(), keys.end(), buf_begins,
thrust::make_discard_iterator(),
output_begins, binary_pred, add_func);
devide_tuple_functor<Args...> dv_func;
thrust::transform(output_begins, output_begins + n_out, counts.begin(),
output_begins, dv_func);
return n_out;
}
struct has_radius_points_functor {
has_radius_points_functor(const int *indices, int n_points, int knn)
: indices_(indices), n_points_(n_points), knn_(knn){};
const int *indices_;
const int n_points_;
const int knn_;
__device__ bool operator()(int idx) const {
int count = 0;
for (int i = 0; i < knn_; ++i) {
if (indices_[idx * knn_ + i] >= 0) count++;
}
return (count > n_points_);
}
};
struct average_distance_functor {
average_distance_functor(const float *distance, int knn)
: distance_(distance), knn_(knn){};
const float *distance_;
const int knn_;
__device__ float operator()(int idx) const {
int count = 0;
float avg = 0;
for (int i = 0; i < knn_; ++i) {
const float d = distance_[idx * knn_ + i];
if (isinf(d) || d < 0.0) continue;
avg += d;
count++;
}
return (count == 0) ? -1.0 : avg / (float)count;
}
};
struct check_distance_threshold_functor {
check_distance_threshold_functor(const float *distances,
float distance_threshold)
: distances_(distances), distance_threshold_(distance_threshold){};
const float *distances_;
const float distance_threshold_;
__device__ bool operator()(int idx) const {
return (distances_[idx] > 0 && distances_[idx] < distance_threshold_);
}
};
} // namespace
std::shared_ptr<PointCloud> PointCloud::SelectByIndex(
const utility::device_vector<size_t> &indices, bool invert) const {
auto output = std::make_shared<PointCloud>();
if (invert) {
size_t n_out = points_.size() - indices.size();
utility::device_vector<size_t> sorted_indices = indices;
thrust::sort(sorted_indices.begin(), sorted_indices.end());
utility::device_vector<size_t> inv_indices(n_out);
thrust::set_difference(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(points_.size()),
sorted_indices.begin(), sorted_indices.end(),
inv_indices.begin());
SelectByIndexImpl(*this, *output, inv_indices);
} else {
SelectByIndexImpl(*this, *output, indices);
}
return output;
}
std::shared_ptr<PointCloud> PointCloud::VoxelDownSample(
float voxel_size) const {
auto output = std::make_shared<PointCloud>();
if (voxel_size <= 0.0) {
utility::LogWarning("[VoxelDownSample] voxel_size <= 0.\n");
return output;
}
const Eigen::Vector3f voxel_size3 =
Eigen::Vector3f(voxel_size, voxel_size, voxel_size);
const Eigen::Vector3f voxel_min_bound = GetMinBound() - voxel_size3 * 0.5;
const Eigen::Vector3f voxel_max_bound = GetMaxBound() + voxel_size3 * 0.5;
if (voxel_size * std::numeric_limits<int>::max() <
(voxel_max_bound - voxel_min_bound).maxCoeff()) {
utility::LogWarning("[VoxelDownSample] voxel_size is too small.\n");
return output;
}
const int n = points_.size();
const bool has_normals = HasNormals();
const bool has_colors = HasColors();
compute_key_functor ck_func(voxel_min_bound, voxel_size);
utility::device_vector<Eigen::Vector3i> keys(n);
thrust::transform(points_.begin(), points_.end(), keys.begin(), ck_func);
utility::device_vector<Eigen::Vector3f> sorted_points = points_;
output->points_.resize(n);
if (!has_normals && !has_colors) {
typedef thrust::tuple<utility::device_vector<Eigen::Vector3f>::iterator>
IteratorTuple;
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
auto n_out = CalcAverageByKey<ZipIterator, Eigen::Vector3f>(
keys, make_tuple_begin(sorted_points),
make_tuple_begin(output->points_));
output->points_.resize(n_out);
} else if (has_normals && !has_colors) {
utility::device_vector<Eigen::Vector3f> sorted_normals = normals_;
output->normals_.resize(n);
typedef thrust::tuple<utility::device_vector<Eigen::Vector3f>::iterator,
utility::device_vector<Eigen::Vector3f>::iterator>
IteratorTuple;
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
auto n_out =
CalcAverageByKey<ZipIterator, Eigen::Vector3f, Eigen::Vector3f>(
keys,
make_tuple_begin(sorted_points, sorted_normals),
make_tuple_begin(output->points_, output->normals_));
resize_all(n_out, output->points_, output->normals_);
thrust::for_each(
output->normals_.begin(), output->normals_.end(),
[] __device__(Eigen::Vector3f & nl) { nl.normalize(); });
} else if (!has_normals && has_colors) {
utility::device_vector<Eigen::Vector3f> sorted_colors = colors_;
output->colors_.resize(n);
typedef thrust::tuple<utility::device_vector<Eigen::Vector3f>::iterator,
utility::device_vector<Eigen::Vector3f>::iterator>
IteratorTuple;
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
auto n_out =
CalcAverageByKey<ZipIterator, Eigen::Vector3f, Eigen::Vector3f>(
keys,
make_tuple_begin(sorted_points, sorted_colors),
make_tuple_begin(output->points_, output->colors_));
resize_all(n_out, output->points_, output->colors_);
} else {
utility::device_vector<Eigen::Vector3f> sorted_normals = normals_;
utility::device_vector<Eigen::Vector3f> sorted_colors = colors_;
output->normals_.resize(n);
output->colors_.resize(n);
typedef thrust::tuple<utility::device_vector<Eigen::Vector3f>::iterator,
utility::device_vector<Eigen::Vector3f>::iterator,
utility::device_vector<Eigen::Vector3f>::iterator>
IteratorTuple;
typedef thrust::zip_iterator<IteratorTuple> ZipIterator;
auto n_out = CalcAverageByKey<ZipIterator, Eigen::Vector3f,
Eigen::Vector3f, Eigen::Vector3f>(
keys,
make_tuple_begin(sorted_points, sorted_normals,
sorted_colors),
make_tuple_begin(output->points_, output->normals_,
output->colors_));
resize_all(n_out, output->points_, output->normals_, output->colors_);
thrust::for_each(
output->normals_.begin(), output->normals_.end(),
[] __device__(Eigen::Vector3f & nl) { nl.normalize(); });
}
utility::LogDebug(
"Pointcloud down sampled from {:d} points to {:d} points.\n",
(int)points_.size(), (int)output->points_.size());
return output;
}
std::shared_ptr<PointCloud> PointCloud::UniformDownSample(
size_t every_k_points) const {
const bool has_normals = HasNormals();
const bool has_colors = HasColors();
auto output = std::make_shared<PointCloud>();
if (every_k_points == 0) {
utility::LogError("[UniformDownSample] Illegal sample rate.");
return output;
}
const int n_out = points_.size() / every_k_points;
output->points_.resize(n_out);
if (has_normals) output->normals_.resize(n_out);
if (has_colors) output->colors_.resize(n_out);
thrust::strided_range<
utility::device_vector<Eigen::Vector3f>::const_iterator>
range_points(points_.begin(), points_.end(), every_k_points);
thrust::copy(utility::exec_policy(utility::GetStream(0))
->on(utility::GetStream(0)),
range_points.begin(), range_points.end(),
output->points_.begin());
if (has_normals) {
thrust::strided_range<
utility::device_vector<Eigen::Vector3f>::const_iterator>
range_normals(normals_.begin(), normals_.end(), every_k_points);
thrust::copy(utility::exec_policy(utility::GetStream(1))
->on(utility::GetStream(1)),
range_normals.begin(), range_normals.end(),
output->normals_.begin());
}
if (has_colors) {
thrust::strided_range<
utility::device_vector<Eigen::Vector3f>::const_iterator>
range_colors(colors_.begin(), colors_.end(), every_k_points);
thrust::copy(utility::exec_policy(utility::GetStream(2))
->on(utility::GetStream(2)),
range_colors.begin(), range_colors.end(),
output->colors_.begin());
}
cudaSafeCall(cudaDeviceSynchronize());
return output;
}
std::tuple<std::shared_ptr<PointCloud>, utility::device_vector<size_t>>
PointCloud::RemoveRadiusOutliers(size_t nb_points, float search_radius) const {
if (nb_points < 1 || search_radius <= 0) {
utility::LogError(
"[RemoveRadiusOutliers] Illegal input parameters,"
"number of points and radius must be positive");
}
KDTreeFlann kdtree;
kdtree.SetGeometry(*this);
utility::device_vector<int> tmp_indices;
utility::device_vector<float> dist;
kdtree.SearchRadius(points_, search_radius, tmp_indices, dist);
const size_t n_pt = points_.size();
utility::device_vector<size_t> indices(n_pt);
has_radius_points_functor func(thrust::raw_pointer_cast(tmp_indices.data()),
nb_points, NUM_MAX_NN);
auto end = thrust::copy_if(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator(n_pt),
indices.begin(), func);
indices.resize(thrust::distance(indices.begin(), end));
return std::make_tuple(SelectByIndex(indices), indices);
}
std::tuple<std::shared_ptr<PointCloud>, utility::device_vector<size_t>>
PointCloud::RemoveStatisticalOutliers(size_t nb_neighbors,
float std_ratio) const {
if (nb_neighbors < 1 || std_ratio <= 0) {
utility::LogError(
"[RemoveStatisticalOutliers] Illegal input parameters, number "
"of neighbors and standard deviation ratio must be positive");
}
if (points_.empty()) {
return std::make_tuple(std::make_shared<PointCloud>(),
utility::device_vector<size_t>());
}
KDTreeFlann kdtree;
kdtree.SetGeometry(*this);
const int n_pt = points_.size();
utility::device_vector<float> avg_distances(n_pt);
utility::device_vector<size_t> indices(n_pt);
utility::device_vector<int> tmp_indices;
utility::device_vector<float> dist;
kdtree.SearchKNN(points_, int(nb_neighbors), tmp_indices, dist);
average_distance_functor avg_func(thrust::raw_pointer_cast(dist.data()),
nb_neighbors);
thrust::transform(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator((size_t)n_pt),
avg_distances.begin(), avg_func);
const size_t valid_distances =
thrust::count_if(avg_distances.begin(), avg_distances.end(),
[] __device__(float x) { return (x >= 0.0); });
if (valid_distances == 0) {
return std::make_tuple(std::make_shared<PointCloud>(),
utility::device_vector<size_t>());
}
float cloud_mean =
thrust::reduce(avg_distances.begin(), avg_distances.end(), 0.0,
[] __device__(float const &x, float const &y) {
return (y > 0) ? x + y : x;
});
cloud_mean /= valid_distances;
const float sq_sum = thrust::transform_reduce(
avg_distances.begin(), avg_distances.end(),
[cloud_mean] __device__(const float x) {
return (x > 0) ? (x - cloud_mean) * (x - cloud_mean) : 0;
},
0.0, thrust::plus<float>());
// Bessel's correction
const float std_dev = std::sqrt(sq_sum / (valid_distances - 1));
const float distance_threshold = cloud_mean + std_ratio * std_dev;
check_distance_threshold_functor th_func(
thrust::raw_pointer_cast(avg_distances.data()), distance_threshold);
auto end = thrust::copy_if(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator((size_t)n_pt),
indices.begin(), th_func);
indices.resize(thrust::distance(indices.begin(), end));
return std::make_tuple(SelectByIndex(indices), indices);
}
|
851d87f3d02231f1ade6082f84c5a86b2dfebbf0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
int main() {
int nDevices;
hipGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Major Compute Capability: %d\n",
prop.minor);
printf(" Major Compute Capability : %d\n",
prop.major);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
} | 851d87f3d02231f1ade6082f84c5a86b2dfebbf0.cu | #include <stdio.h>
int main() {
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Major Compute Capability: %d\n",
prop.minor);
printf(" Major Compute Capability : %d\n",
prop.major);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
} |
0e1e96d0f95aedeea24a845efcdfc23c609de9eb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernelUpdatePBest.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *positions = NULL;
hipMalloc(&positions, XSIZE*YSIZE);
float *pBests = NULL;
hipMalloc(&pBests, XSIZE*YSIZE);
float *gBest = NULL;
hipMalloc(&gBest, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernelUpdatePBest), dim3(gridBlock),dim3(threadBlock), 0, 0, positions,pBests,gBest);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernelUpdatePBest), dim3(gridBlock),dim3(threadBlock), 0, 0, positions,pBests,gBest);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernelUpdatePBest), dim3(gridBlock),dim3(threadBlock), 0, 0, positions,pBests,gBest);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0e1e96d0f95aedeea24a845efcdfc23c609de9eb.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernelUpdatePBest.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *positions = NULL;
cudaMalloc(&positions, XSIZE*YSIZE);
float *pBests = NULL;
cudaMalloc(&pBests, XSIZE*YSIZE);
float *gBest = NULL;
cudaMalloc(&gBest, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernelUpdatePBest<<<gridBlock,threadBlock>>>(positions,pBests,gBest);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernelUpdatePBest<<<gridBlock,threadBlock>>>(positions,pBests,gBest);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernelUpdatePBest<<<gridBlock,threadBlock>>>(positions,pBests,gBest);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
aa034492ec136c079d1e80ed37320a1176ea18bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void optimalTransposeKernel(const float *input, float *output, int n)
{
__shared__ float tile[64][65];
int x = blockIdx.x * 64 + threadIdx.x;
int y = blockIdx.y * 64 + threadIdx.y;
const int width = gridDim.x * 64;
const int height = gridDim.y * 64;
if (x < width && y < height)
{ tile[threadIdx.y][threadIdx.x] = input[y*width + x];
tile[threadIdx.y+16][threadIdx.x] = input[(y+16)*width +x];
tile[threadIdx.y+32][threadIdx.x] = input[(y+32)*width +x];
tile[threadIdx.y+48][threadIdx.x] = input[(y+48)*width +x];
}
__syncthreads();
x = blockIdx.y * 64 + threadIdx.x; // transpose block offset
y = blockIdx.x * 64 + threadIdx.y;
if (y < width && x < height)
{ output[y*height + x] = tile[threadIdx.x][threadIdx.y];
output[(y+16)*height +x] = tile[threadIdx.x][threadIdx.y+16];
output[(y+32)*height +x] = tile[threadIdx.x][threadIdx.y+32];
output[(y+48)*height +x] = tile[threadIdx.x][threadIdx.y+48];
}
} | aa034492ec136c079d1e80ed37320a1176ea18bc.cu | #include "includes.h"
__global__ void optimalTransposeKernel(const float *input, float *output, int n)
{
__shared__ float tile[64][65];
int x = blockIdx.x * 64 + threadIdx.x;
int y = blockIdx.y * 64 + threadIdx.y;
const int width = gridDim.x * 64;
const int height = gridDim.y * 64;
if (x < width && y < height)
{ tile[threadIdx.y][threadIdx.x] = input[y*width + x];
tile[threadIdx.y+16][threadIdx.x] = input[(y+16)*width +x];
tile[threadIdx.y+32][threadIdx.x] = input[(y+32)*width +x];
tile[threadIdx.y+48][threadIdx.x] = input[(y+48)*width +x];
}
__syncthreads();
x = blockIdx.y * 64 + threadIdx.x; // transpose block offset
y = blockIdx.x * 64 + threadIdx.y;
if (y < width && x < height)
{ output[y*height + x] = tile[threadIdx.x][threadIdx.y];
output[(y+16)*height +x] = tile[threadIdx.x][threadIdx.y+16];
output[(y+32)*height +x] = tile[threadIdx.x][threadIdx.y+32];
output[(y+48)*height +x] = tile[threadIdx.x][threadIdx.y+48];
}
} |
065cbdb0589bee416f044df2a8cfebce384de062.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void RANSAC_Fundamental(const Point2Df *src, const Point2Df *dst,int pts_num, const int *rand_list, float inlier_threshold, int iterations, int *inliers, float *fundamental)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
//
if(idx >= iterations) return;
int rand_idx[4]; //
Point2Df _src[4]; //
Point2Df _dst[4]; //
//F
float *F = &fundamental[idx*9];
//
rand_idx[0] = rand_list[idx*4+0]; rand_idx[1] = rand_list[idx*4+1];
rand_idx[2] = rand_list[idx*4+2]; rand_idx[3] = rand_list[idx*4+3];
//
if(rand_idx[0] == rand_idx[1]) return; if(rand_idx[0] == rand_idx[2]) return;
if(rand_idx[0] == rand_idx[3]) return; if(rand_idx[1] == rand_idx[2]) return;
if(rand_idx[1] == rand_idx[3]) return; if(rand_idx[2] == rand_idx[3]) return;
//4
for(int i=0; i < 4; i++)
{
_src[i].x = src[rand_idx[i]].x;
_src[i].y = src[rand_idx[i]].y;
_dst[i].x = dst[rand_idx[i]].x;
_dst[i].y = dst[rand_idx[i]].y;
}
//
//8
int ret = GetFundamental(_src, _dst, F);
//
inliers[idx] = EvalFundamental(src, dst, pts_num, F, inlier_threshold);
}
| 065cbdb0589bee416f044df2a8cfebce384de062.cu | __global__ void RANSAC_Fundamental(const Point2Df *src, const Point2Df *dst,int pts_num, const int *rand_list, float inlier_threshold, int iterations, int *inliers, float *fundamental)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
//如果超出了迭代次数则返回
if(idx >= iterations) return;
int rand_idx[4]; //随机列表索引
Point2Df _src[4]; //源影像像点
Point2Df _dst[4]; //匹配像点
//每个线程计算一个F矩阵
float *F = &fundamental[idx*9];
//获取每个线程负责的随机列表索引
rand_idx[0] = rand_list[idx*4+0]; rand_idx[1] = rand_list[idx*4+1];
rand_idx[2] = rand_list[idx*4+2]; rand_idx[3] = rand_list[idx*4+3];
//去除重复像对
if(rand_idx[0] == rand_idx[1]) return; if(rand_idx[0] == rand_idx[2]) return;
if(rand_idx[0] == rand_idx[3]) return; if(rand_idx[1] == rand_idx[2]) return;
if(rand_idx[1] == rand_idx[3]) return; if(rand_idx[2] == rand_idx[3]) return;
//通过随机列表索引获取求解基础矩阵所需的4个像对
for(int i=0; i < 4; i++)
{
_src[i].x = src[rand_idx[i]].x;
_src[i].y = src[rand_idx[i]].y;
_dst[i].x = dst[rand_idx[i]].x;
_dst[i].y = dst[rand_idx[i]].y;
}
//对基础矩阵归一化以提高噪声的鲁棒性
//8点法计算基础矩阵
int ret = GetFundamental(_src, _dst, F);
//计算基础矩阵的内围点个数
inliers[idx] = EvalFundamental(src, dst, pts_num, F, inlier_threshold);
}
|
0ca5b459c1d9a69720fff5ce7f770071a1089a35.hip | // !!! This is a file automatically generated by hipify!!!
#include <sstream>
#include <string>
#include <cstring>
#include <cstdlib>
#include <fstream>
#include "CDevice.hh"
#include "LaunchCommon.hh" // mkdirp
#include "PLOG.hh"
const plog::Severity CDevice::LEVEL = PLOG::EnvLevel("CDevice", "DEBUG");
const char* CDevice::desc() const
{
std::stringstream ss ;
// uuid is not printable
ss << "CDevice"
<< " index " << index
<< " ordinal " << ordinal
<< " name " << name
<< " major " << major
<< " minor " << minor
<< " compute_capability " << compute_capability
<< " multiProcessorCount " << multiProcessorCount
<< " totalGlobalMem " << totalGlobalMem
;
std::string s = ss.str();
return strdup(s.c_str());
}
bool CDevice::matches(const CDevice& other) const
{
return strncmp(other.uuid, uuid, sizeof(uuid)) == 0 && strncmp(other.name, name, sizeof(name)) == 0;
}
/**
CDevice::Collect
--------------------
**/
void CDevice::Collect(std::vector<CDevice>& devices, bool ordinal_from_index)
{
int devCount;
hipGetDeviceCount(&devCount);
LOG(LEVEL) << "hipGetDeviceCount : " << devCount ;
for (int i = 0; i < devCount; ++i)
{
hipDeviceProp_t p;
hipGetDeviceProperties(&p, i);
CDevice d ;
assert( sizeof(p.name) == sizeof(name) ) ;
strncpy( d.name, p.name, sizeof(p.name) );
assert( sizeof(p.uuid) == sizeof(uuid) );
strncpy( d.uuid, p.uuid.bytes, sizeof(p.uuid) );
d.index = i ;
d.ordinal = ordinal_from_index ? i : -1 ;
d.major = p.major ;
d.minor = p.minor ;
d.compute_capability = p.major*10 + p.minor ;
d.multiProcessorCount = p.multiProcessorCount ;
d.totalGlobalMem = p.totalGlobalMem ;
devices.push_back(d);
}
}
int CDevice::Size()
{
return
sizeof(ordinal) +
sizeof(index) +
sizeof(name) +
sizeof(uuid) +
sizeof(major) +
sizeof(minor) +
sizeof(compute_capability) +
sizeof(multiProcessorCount) +
sizeof(totalGlobalMem) ;
}
void CDevice::write( std::ostream& out ) const
{
int size = Size();
char* buffer = new char[size];
char* p = buffer ;
memcpy( p, &ordinal, sizeof(ordinal) ) ; p += sizeof(ordinal) ;
memcpy( p, &index, sizeof(index) ) ; p += sizeof(index) ;
memcpy( p, name, sizeof(name) ) ; p += sizeof(name) ;
memcpy( p, uuid, sizeof(uuid) ) ; p += sizeof(uuid) ;
memcpy( p, &major, sizeof(major) ) ; p += sizeof(major) ;
memcpy( p, &minor, sizeof(minor) ) ; p += sizeof(minor) ;
memcpy( p, &compute_capability, sizeof(compute_capability) ) ; p += sizeof(compute_capability) ;
memcpy( p, &multiProcessorCount, sizeof(multiProcessorCount) ) ; p += sizeof(multiProcessorCount) ;
memcpy( p, &totalGlobalMem, sizeof(totalGlobalMem) ) ; p += sizeof(totalGlobalMem) ;
out.write(buffer, size);
delete [] buffer ;
}
void CDevice::read( std::istream& in )
{
int size = Size();
char* buffer = new char[size];
in.read(buffer, size);
char* p = buffer ;
memcpy( &ordinal, p, sizeof(ordinal) ) ; p += sizeof(ordinal) ;
memcpy( &index, p, sizeof(index) ) ; p += sizeof(index) ;
memcpy( name, p, sizeof(name) ) ; p += sizeof(name) ;
memcpy( uuid, p, sizeof(uuid) ) ; p += sizeof(uuid) ;
memcpy( &major, p, sizeof(major) ) ; p += sizeof(major) ;
memcpy( &minor, p, sizeof(minor) ) ; p += sizeof(minor) ;
memcpy( &compute_capability, p, sizeof(compute_capability) ) ; p += sizeof(compute_capability) ;
memcpy( &multiProcessorCount,p, sizeof(multiProcessorCount) ) ; p += sizeof(multiProcessorCount) ;
memcpy( &totalGlobalMem, p, sizeof(totalGlobalMem) ) ; p += sizeof(totalGlobalMem) ;
delete [] buffer ;
}
const char* CDevice::CVD = "CUDA_VISIBLE_DEVICES" ;
/**
CDevice::Visible
------------------
This assumes that the ordinal is the index when all GPUs are visible
and it finds this by arranging to persist the query when
CUDA_VISIBLE_DEVICES is not defined and use that to provide something
to match against when the envvar is defined.
Initially tried to do this in one go by changing envvar
and repeating the query. But that doesnt work,
presumably as the CUDA_VISIBLE_DEVICES value only has
any effect when cuda is initialized.
Of course the disadvantage of this approach
is that need to arrange to do the persisting of all devices
at some initialization time and need to find an
appropriate place for the file.
The purpose is for reference running, especially performance
scanning : so its acceptable to require running a metadata
capturing executable prior to scanning.
Possibly NVML can provide a better solution, see nvml-
Actually maybe not : the NVML enumeration order follows nvidia-smi
not CUDA.
**/
void CDevice::Visible(std::vector<CDevice>& visible, const char* dirpath, bool nosave)
{
char* cvd = getenv(CVD);
bool no_cvd = cvd == NULL ;
std::vector<CDevice> all ;
bool ordinal_from_index = no_cvd ;
Collect(visible, ordinal_from_index);
if( no_cvd )
{
LOG(LEVEL) << " no_cvd " ;
if(!nosave)
Save( visible, dirpath );
}
else
{
LOG(LEVEL) << " with cvd " << cvd ;
Load(all, dirpath);
for(unsigned i=0 ; i < visible.size() ; i++)
{
CDevice& v = visible[i] ;
v.ordinal = FindIndexOfMatchingDevice( v, all );
}
}
}
int CDevice::FindIndexOfMatchingDevice( const CDevice& d, const std::vector<CDevice>& all )
{
int index = -1 ;
LOG(LEVEL)
<< " d " << d.desc()
<< " all.size " << all.size()
;
for(unsigned i=0 ; i < all.size() ; i++)
{
const CDevice& a = all[i] ;
bool m = a.matches(d) ;
LOG(LEVEL)
<< " a " << a.desc()
<< " m " << m
;
if(m)
{
index = a.index ;
break ;
}
}
LOG(LEVEL) << " index : " << index ;
return index ;
}
void CDevice::Dump( const std::vector<CDevice>& devices, const char* msg )
{
LOG(info) << msg << "[" << Brief(devices) << "]" ;
for(unsigned i=0 ; i < devices.size() ; i++)
{
const CDevice& d = devices[i] ;
LOG(info) << d.desc();
}
}
const char* CDevice::FILENAME = "CDevice.bin" ;
std::string CDevice::Path(const char* dirpath)
{
std::stringstream ss ;
if( dirpath ) ss << dirpath << "/" ;
ss << FILENAME ;
return ss.str();
}
void CDevice::PrepDir(const char* dirpath)
{
mkdirp(dirpath, 0777);
}
void CDevice::Save( const std::vector<CDevice>& devices, const char* dirpath)
{
std::string path = Path(dirpath);
PrepDir(dirpath);
LOG(LEVEL) << "path " << path ;
std::ofstream out(path.c_str(), std::ofstream::binary);
if(out.fail())
{
LOG(error) << " failed open for " << path ;
return ;
}
for(unsigned i = 0 ; i < devices.size() ; ++i )
{
const CDevice& d = devices[i] ;
d.write(out);
}
}
void CDevice::Load( std::vector<CDevice>& devices, const char* dirpath)
{
std::string path = Path(dirpath);
LOG(LEVEL)
<< "dirpath " << dirpath
<< "path " << path
;
std::ifstream in(path.c_str(), std::ofstream::binary);
CDevice d ;
while(true)
{
d.read(in);
if(in.eof()) return ;
if(in.fail())
{
LOG(error) << " failed read from " << path ;
return ;
}
devices.push_back(d);
}
}
std::string CDevice::Brief( const std::vector<CDevice>& devices )
{
std::stringstream ss ;
for(unsigned i=0 ; i < devices.size() ; i++)
{
const CDevice& d = devices[i] ;
ss << d.ordinal << ':' ;
for(unsigned j=0 ; j < strlen(d.name) ; j++)
{
char c = *(d.name+j) ;
ss << ( c == ' ' ? '_' : c ) ;
}
if( i < devices.size() - 1 ) ss << ' ' ;
}
return ss.str();
}
| 0ca5b459c1d9a69720fff5ce7f770071a1089a35.cu |
#include <sstream>
#include <string>
#include <cstring>
#include <cstdlib>
#include <fstream>
#include "CDevice.hh"
#include "LaunchCommon.hh" // mkdirp
#include "PLOG.hh"
const plog::Severity CDevice::LEVEL = PLOG::EnvLevel("CDevice", "DEBUG");
const char* CDevice::desc() const
{
std::stringstream ss ;
// uuid is not printable
ss << "CDevice"
<< " index " << index
<< " ordinal " << ordinal
<< " name " << name
<< " major " << major
<< " minor " << minor
<< " compute_capability " << compute_capability
<< " multiProcessorCount " << multiProcessorCount
<< " totalGlobalMem " << totalGlobalMem
;
std::string s = ss.str();
return strdup(s.c_str());
}
bool CDevice::matches(const CDevice& other) const
{
return strncmp(other.uuid, uuid, sizeof(uuid)) == 0 && strncmp(other.name, name, sizeof(name)) == 0;
}
/**
CDevice::Collect
--------------------
**/
void CDevice::Collect(std::vector<CDevice>& devices, bool ordinal_from_index)
{
int devCount;
cudaGetDeviceCount(&devCount);
LOG(LEVEL) << "cudaGetDeviceCount : " << devCount ;
for (int i = 0; i < devCount; ++i)
{
cudaDeviceProp p;
cudaGetDeviceProperties(&p, i);
CDevice d ;
assert( sizeof(p.name) == sizeof(name) ) ;
strncpy( d.name, p.name, sizeof(p.name) );
assert( sizeof(p.uuid) == sizeof(uuid) );
strncpy( d.uuid, p.uuid.bytes, sizeof(p.uuid) );
d.index = i ;
d.ordinal = ordinal_from_index ? i : -1 ;
d.major = p.major ;
d.minor = p.minor ;
d.compute_capability = p.major*10 + p.minor ;
d.multiProcessorCount = p.multiProcessorCount ;
d.totalGlobalMem = p.totalGlobalMem ;
devices.push_back(d);
}
}
int CDevice::Size()
{
return
sizeof(ordinal) +
sizeof(index) +
sizeof(name) +
sizeof(uuid) +
sizeof(major) +
sizeof(minor) +
sizeof(compute_capability) +
sizeof(multiProcessorCount) +
sizeof(totalGlobalMem) ;
}
void CDevice::write( std::ostream& out ) const
{
int size = Size();
char* buffer = new char[size];
char* p = buffer ;
memcpy( p, &ordinal, sizeof(ordinal) ) ; p += sizeof(ordinal) ;
memcpy( p, &index, sizeof(index) ) ; p += sizeof(index) ;
memcpy( p, name, sizeof(name) ) ; p += sizeof(name) ;
memcpy( p, uuid, sizeof(uuid) ) ; p += sizeof(uuid) ;
memcpy( p, &major, sizeof(major) ) ; p += sizeof(major) ;
memcpy( p, &minor, sizeof(minor) ) ; p += sizeof(minor) ;
memcpy( p, &compute_capability, sizeof(compute_capability) ) ; p += sizeof(compute_capability) ;
memcpy( p, &multiProcessorCount, sizeof(multiProcessorCount) ) ; p += sizeof(multiProcessorCount) ;
memcpy( p, &totalGlobalMem, sizeof(totalGlobalMem) ) ; p += sizeof(totalGlobalMem) ;
out.write(buffer, size);
delete [] buffer ;
}
void CDevice::read( std::istream& in )
{
int size = Size();
char* buffer = new char[size];
in.read(buffer, size);
char* p = buffer ;
memcpy( &ordinal, p, sizeof(ordinal) ) ; p += sizeof(ordinal) ;
memcpy( &index, p, sizeof(index) ) ; p += sizeof(index) ;
memcpy( name, p, sizeof(name) ) ; p += sizeof(name) ;
memcpy( uuid, p, sizeof(uuid) ) ; p += sizeof(uuid) ;
memcpy( &major, p, sizeof(major) ) ; p += sizeof(major) ;
memcpy( &minor, p, sizeof(minor) ) ; p += sizeof(minor) ;
memcpy( &compute_capability, p, sizeof(compute_capability) ) ; p += sizeof(compute_capability) ;
memcpy( &multiProcessorCount,p, sizeof(multiProcessorCount) ) ; p += sizeof(multiProcessorCount) ;
memcpy( &totalGlobalMem, p, sizeof(totalGlobalMem) ) ; p += sizeof(totalGlobalMem) ;
delete [] buffer ;
}
const char* CDevice::CVD = "CUDA_VISIBLE_DEVICES" ;
/**
CDevice::Visible
------------------
This assumes that the ordinal is the index when all GPUs are visible
and it finds this by arranging to persist the query when
CUDA_VISIBLE_DEVICES is not defined and use that to provide something
to match against when the envvar is defined.
Initially tried to do this in one go by changing envvar
and repeating the query. But that doesnt work,
presumably as the CUDA_VISIBLE_DEVICES value only has
any effect when cuda is initialized.
Of course the disadvantage of this approach
is that need to arrange to do the persisting of all devices
at some initialization time and need to find an
appropriate place for the file.
The purpose is for reference running, especially performance
scanning : so its acceptable to require running a metadata
capturing executable prior to scanning.
Possibly NVML can provide a better solution, see nvml-
Actually maybe not : the NVML enumeration order follows nvidia-smi
not CUDA.
**/
void CDevice::Visible(std::vector<CDevice>& visible, const char* dirpath, bool nosave)
{
char* cvd = getenv(CVD);
bool no_cvd = cvd == NULL ;
std::vector<CDevice> all ;
bool ordinal_from_index = no_cvd ;
Collect(visible, ordinal_from_index);
if( no_cvd )
{
LOG(LEVEL) << " no_cvd " ;
if(!nosave)
Save( visible, dirpath );
}
else
{
LOG(LEVEL) << " with cvd " << cvd ;
Load(all, dirpath);
for(unsigned i=0 ; i < visible.size() ; i++)
{
CDevice& v = visible[i] ;
v.ordinal = FindIndexOfMatchingDevice( v, all );
}
}
}
int CDevice::FindIndexOfMatchingDevice( const CDevice& d, const std::vector<CDevice>& all )
{
int index = -1 ;
LOG(LEVEL)
<< " d " << d.desc()
<< " all.size " << all.size()
;
for(unsigned i=0 ; i < all.size() ; i++)
{
const CDevice& a = all[i] ;
bool m = a.matches(d) ;
LOG(LEVEL)
<< " a " << a.desc()
<< " m " << m
;
if(m)
{
index = a.index ;
break ;
}
}
LOG(LEVEL) << " index : " << index ;
return index ;
}
void CDevice::Dump( const std::vector<CDevice>& devices, const char* msg )
{
LOG(info) << msg << "[" << Brief(devices) << "]" ;
for(unsigned i=0 ; i < devices.size() ; i++)
{
const CDevice& d = devices[i] ;
LOG(info) << d.desc();
}
}
const char* CDevice::FILENAME = "CDevice.bin" ;
std::string CDevice::Path(const char* dirpath)
{
std::stringstream ss ;
if( dirpath ) ss << dirpath << "/" ;
ss << FILENAME ;
return ss.str();
}
void CDevice::PrepDir(const char* dirpath)
{
mkdirp(dirpath, 0777);
}
void CDevice::Save( const std::vector<CDevice>& devices, const char* dirpath)
{
std::string path = Path(dirpath);
PrepDir(dirpath);
LOG(LEVEL) << "path " << path ;
std::ofstream out(path.c_str(), std::ofstream::binary);
if(out.fail())
{
LOG(error) << " failed open for " << path ;
return ;
}
for(unsigned i = 0 ; i < devices.size() ; ++i )
{
const CDevice& d = devices[i] ;
d.write(out);
}
}
void CDevice::Load( std::vector<CDevice>& devices, const char* dirpath)
{
std::string path = Path(dirpath);
LOG(LEVEL)
<< "dirpath " << dirpath
<< "path " << path
;
std::ifstream in(path.c_str(), std::ofstream::binary);
CDevice d ;
while(true)
{
d.read(in);
if(in.eof()) return ;
if(in.fail())
{
LOG(error) << " failed read from " << path ;
return ;
}
devices.push_back(d);
}
}
std::string CDevice::Brief( const std::vector<CDevice>& devices )
{
std::stringstream ss ;
for(unsigned i=0 ; i < devices.size() ; i++)
{
const CDevice& d = devices[i] ;
ss << d.ordinal << ':' ;
for(unsigned j=0 ; j < strlen(d.name) ; j++)
{
char c = *(d.name+j) ;
ss << ( c == ' ' ? '_' : c ) ;
}
if( i < devices.size() - 1 ) ss << ' ' ;
}
return ss.str();
}
|
92dbc76142581a71668525246445b9416c437bfb.hip | // !!! This is a file automatically generated by hipify!!!
#include "test.h"
#include "book.h"
#include "hip/hip_runtime.h"
//GPU**************************************************************************************//
__global__ void ave_process(unsigned char* dev_original,unsigned char *dev_out,int R,int C){
int x=threadIdx.x;
int y=blockIdx.x;
int offset=x+y*blockDim.x;
while(offset<R*C){
int dev_y[NKsize*NKsize];
int gx=offset/C;
int gy=offset%C;
int sum=0;
int fx=gx-NKsize/2;
int fy=gy-NKsize/2;
for(int i=0;i<NKsize;i++){
for(int j=0;j<NKsize;j++){
if((fx+i)<0||(fy+j)<0||(fx+i)>=R||(fy+j)>=C){
dev_y[i*NKsize+j]=dev_original[offset];
}
else{
dev_y[i*NKsize+j]=dev_original[(fx+i)*C+(fy+j)];
}
sum+=dev_y[i*NKsize+j];
}
}
dev_out[offset]=sum/(NKsize*NKsize);
// dev_out[offset]=dev_original[offset];
offset+=blockDim.x*gridDim.x;
}
}
//GPU**************************************************************************************//
__global__ void mid_process(unsigned char *dev_original,unsigned char *dev_out,int R,int C){
int x=threadIdx.x;
int y=blockIdx.x;
int offset=x+y*blockDim.x;
while(offset<R*C){
int dev_y[NKsize*NKsize];
int gx=offset/C;
int gy=offset%C;
int fx=gx-NKsize/2;
int fy=gy-NKsize/2;
for(int i=0;i<NKsize;i++){
for(int j=0;j<NKsize;j++){
if((fx+i)<0||(fy+j)<0||(fx+i)>=R||(fy+j)>=C){
dev_y[i*NKsize+j]=dev_original[offset];
}
else{
dev_y[i*NKsize+j]=dev_original[(fx+i)*C+(fy+j)];
}
}
}
int temp;
for(int i=0;i<NKsize*NKsize-1;i++){
for(int j=0;j<NKsize*NKsize-1-i;j++){
if(dev_y[j]>dev_y[j+1]){
temp=dev_y[j];
dev_y[j]=dev_y[j+1];
dev_y[j+1]=temp;
}
}
}
dev_out[offset]=dev_y[NKsize*NKsize/2];
offset+=blockDim.x*gridDim.x;
}
}
//**************************************************************************************//
float Kcal(int x1,int y1,int x0,int y0) {
float distance=(x1-x0)*(x1-x0)+(y1-y0)*(y1-y0);
float h=1;
float t=distance/h;
float xishu=1/((2*pai));
// float zhishu=(-0.5)*((x1-x0)*(x1-x0)+(y1-y0)*(y1-y0));
float r=xishu*exp(-0.5*t*t);
return r;
}
void init_Wcal(float *W) {
int offset;
int x0=Ksize/2;
int y0=Ksize/2;
for(int i=0; i<Ksize*Ksize; i++) {
for(int j=0; j<Ksize*Ksize; j++) {
if(i==j) {
int fx=i/Ksize;
int fy=i%Ksize;
W[i*Ksize*Ksize+j]=Kcal(fx,fy,x0,y0);
} else {
W[i*Ksize*Ksize+j]=0;
}
}
}
}
//GPU
__device__ void Mult(float *c,float* a,float *b,int a_r,int a_c,int b_r,int b_c){
for(int i=0;i<a_r;i++){
for(int j=0;j<b_c;j++){
c[i*b_c+j]=0;
for(int k=0;k<a_c;k++){
c[i*b_c+j]+=a[i*a_c+k]*b[k*b_c+j];
}
}
}
}
__global__ void cla_process(unsigned char *dev_original,unsigned char *dev_out,float *dev_mid,int R,int C){
int x=blockIdx.x;
int y=threadIdx.x;
int offset=y+x*blockDim.x;
while(offset<R*C){
float dev_y[Ksize*Ksize];
float dev_res[3*1];
int fx=offset/C-Ksize/2;
int fy=offset%C-Ksize/2;
for(int i=0;i<Ksize;i++){
for(int j=0;j<Ksize;j++){
if((fx+i)<0||(fy+j)<0||(fx+i)>=R||(fy+j)>=C)
dev_y[i*Ksize+j]=dev_original[offset];
else
dev_y[i*Ksize+j]=dev_original[(fx+i)*C+(fy+j)];
}
}
Mult(dev_res,dev_mid,dev_y,3,Ksize*Ksize,Ksize*Ksize,1);
float value=dev_res[0];
// value/=1.3;
int grayvalue=(int )value;
if(grayvalue>255){
grayvalue=255;
}
else if(grayvalue<0){
grayvalue=0;
}
dev_out[offset]=(unsigned char)grayvalue;
offset=offset+blockDim.x*gridDim.x;
}
}
//**************************************************************************************//
__device__ float Kself(int x1,int y1,int x0,int y0,float *C){
float h=4;
float distance=(x1-x0)*(x1-x0)*C[0]+(x1-x0)*(y1-y0)*C[2]+(x1-x0)*(y1-y0)*C[1]+(y1-y0)*(y1-y0)*C[3];
float xishu=sqrt(C[0]*C[3]-C[1]*C[2])/(2*pai*h*h);
float r=xishu*exp(-0.5*distance/(h*h));
return r;
}
//W (Ksize2 x Ksize2)
__device__ void init_Wself(float* W,float *C){
int offset;
int x0=Ksize/2;
int y0=Ksize/2;
for(int i=0;i<Ksize*Ksize;i++){
for(int j=0;j<Ksize*Ksize;j++){
if(i==j){
int fx=i/Ksize;
int fy=i%Ksize;
W[i*Ksize*Ksize+j]=Kself(fx,fy,x0,y0,C);
}
else{
W[i*Ksize*Ksize+j]=0;
}
}
}
}
//
__device__ void init_Gself(float *G,float *y){
int fx=Ksize/2;
int fy=Ksize/2;
int num=0;
for(int i=0;i<Ksize;i++){
for(int j=0;j<Ksize;j++){
if(i!=fx){
G[num*2]=(y[i*Ksize+j]-y[fx*Ksize+fy])/(i-fx);
}
else{
G[num*2]=0;
}
if(j!=fy){
G[num*2+1]=(y[i*Ksize+j]-y[fx*Ksize+fy])/(j-fy);
}
else{
G[num*2+1]=0;
}
/* if(i+1>=Ksize){
G[num*2]=0;
}
else{
G[num*2]=(y[(i+1)*Ksize+j]-y[i*Ksize+j]);
}
if(j+1>=Ksize){
G[num*2+1]=0;
}
else{
G[num*2+1]=y[i*Ksize+j+1]-y[i*Ksize+j];
}*/
num++;
}
}
}
__device__ void pppself(float a[],float e[],float s[],float v[],int m,int n) {
int i,j,p,q;
double d;
if(m>=n)
i=n;
else
i=m;
for(j=1; j<=i-1; j++) {
a[(j-1)*n+j-1]=s[j-1];
a[(j-1)*n+j]=e[j-1];
}
a[(i-1)*n+i-1]=s[i-1];
if(m<n)
a[(i-1)*n+i]=e[i-1];
for(i=1; i<=n-1; i++)
for(j=i+1; j<=n; j++) {
p=(i-1)*n+j-1;
q=(j-1)*n+i-1;
d=v[p];
v[p]=v[q];
v[q]=d;
}
return;
}
__device__ void sssself(float fg[2],float cs[2]) {
float r,d;
//if((fabs(fg[0])+fabs(fg[1]))==0.0)
if((fabs(fg[0])+fabs(fg[1]))<MIN_DOUBLE) {
cs[0]=1.0;
cs[1]=0.0;
d=0.0;
} else {
d=sqrt(fg[0]*fg[0]+fg[1]*fg[1]);
if(fabs(fg[0])>fabs(fg[1])) {
d=fabs(d);
if(fg[0]<0.0)
d=-d;
}
if(fabs(fg[1])>=fabs(fg[0])) {
d=fabs(d);
if(fg[1]<0.0)
d=-d;
}
cs[0]=fg[0]/d;
cs[1]=fg[1]/d;
}
r=1.0;
if(fabs(fg[0])>fabs(fg[1]))
r=cs[1];
else
//if(cs[0]!=0.0)
if(fabs(cs[0])>MIN_DOUBLE)
r=1.0/cs[0];
fg[0]=d;
fg[1]=r;
return;
}
//
__device__ int dluavself(float a[],int m,int n,float u[],float v[],float eps,int ka){
int i,j,k,l,it,ll,kk,ix,iy,mm,nn,iz,ml,ks;
float d,dd,t,sm,sml,eml,sk,ek,b,c,shh,fg[2],cs[2];
float s[26];
float w[26];
float e[26];
for(i=1; i<=m; i++) {
ix=(i-1)*m+i-1;
u[ix]=0;
}
for(i=1; i<=n; i++) {
iy=(i-1)*n+i-1;
v[iy]=0;
}
it=MAX_ITERA;
k=n;
if(m-1<n)
k=m-1;
l=m;
if(n-2<m) l=n-2;
if(l<0) l=0;
ll=k;
if(l>k) ll=l;
if(ll>=1) {
for(kk=1; kk<=ll; kk++) {
if(kk<=k) {
d=0.0;
for(i=kk; i<=m; i++) {
ix=(i-1)*n+kk-1;
d=d+a[ix]*a[ix];
}
s[kk-1]=sqrt(d);
//if(s[kk-1]!=0.0)
if(fabs(s[kk-1])>MIN_DOUBLE) {
ix=(kk-1)*n+kk-1;
//if(a[ix]!=0.0)
if(fabs(a[ix])>MIN_DOUBLE) {
s[kk-1]=fabs(s[kk-1]);
if(a[ix]<0.0) s[kk-1]=-s[kk-1];
}
for(i=kk; i<=m; i++) {
iy=(i-1)*n+kk-1;
a[iy]=a[iy]/s[kk-1];
}
a[ix]=1.0+a[ix];
}
s[kk-1]=-s[kk-1];
}
if(n>=kk+1) {
for(j=kk+1; j<=n; j++) {
//if((kk<=k)&&(s[kk-1]!=0.0))
if((kk<=k)&&(fabs(s[kk-1])>MIN_DOUBLE)) {
d=0.0;
for(i=kk; i<=m; i++) {
ix=(i-1)*n+kk-1;
iy=(i-1)*n+j-1;
d=d+a[ix]*a[iy];
}
d=-d/a[(kk-1)*n+kk-1];
for(i=kk; i<=m; i++) {
ix=(i-1)*n+j-1;
iy=(i-1)*n+kk-1;
a[ix]=a[ix]+d*a[iy];
}
}
e[j-1]=a[(kk-1)*n+j-1];
}
}
if(kk<=k) {
for(i=kk; i<=m; i++) {
ix=(i-1)*m+kk-1;
iy=(i-1)*n+kk-1;
u[ix]=a[iy];
}
}
if(kk<=l) {
d=0.0;
for(i=kk+1; i<=n; i++)
d=d+e[i-1]*e[i-1];
e[kk-1]=sqrt(d);
//if(e[kk-1]!=0.0)
if(fabs(e[kk-1])>MIN_DOUBLE) {
//if(e[kk]!=0.0)
if(fabs(e[kk])>MIN_DOUBLE) {
e[kk-1]=fabs(e[kk-1]);
if(e[kk]<0.0)
e[kk-1]=-e[kk-1];
}
for(i=kk+1; i<=n; i++)
e[i-1]=e[i-1]/e[kk-1];
e[kk]=1.0+e[kk];
}
e[kk-1]=-e[kk-1];
//if((kk+1<=m)&&(e[kk-1]!=0.0))
if((kk+1<=m)&&(fabs(e[kk-1])>MIN_DOUBLE)) {
for(i=kk+1; i<=m; i++) w[i-1]=0.0;
for(j=kk+1; j<=n; j++)
for(i=kk+1; i<=m; i++)
w[i-1]=w[i-1]+e[j-1]*a[(i-1)*n+j-1];
for(j=kk+1; j<=n; j++)
for(i=kk+1; i<=m; i++) {
ix=(i-1)*n+j-1;
a[ix]=a[ix]-w[i-1]*e[j-1]/e[kk];
}
}
for(i=kk+1; i<=n; i++)
v[(i-1)*n+kk-1]=e[i-1];
}
}
}
mm=n;
if(m+1<n) mm=m+1;
if(k<n) s[k]=a[k*n+k];
if(m<mm) s[mm-1]=0.0;
if(l+1<mm) e[l]=a[l*n+mm-1];
e[mm-1]=0.0;
nn=m;
if(m>n) nn=n;
if(nn>=k+1) {
for(j=k+1; j<=nn; j++) {
for(i=1; i<=m; i++)
u[(i-1)*m+j-1]=0.0;
u[(j-1)*m+j-1]=1.0;
}
}
if(k>=1) { /////////////////////////////////
for(ll=1; ll<=k; ll++) {
kk=k-ll+1;
iz=(kk-1)*m+kk-1;
//if(s[kk-1]!=0.0)
if(fabs(s[kk-1])>MIN_DOUBLE) {
if(nn>=kk+1)
for(j=kk+1; j<=nn; j++) {
d=0.0;
for(i=kk; i<=m; i++) {
ix=(i-1)*m+kk-1;
iy=(i-1)*m+j-1;
d=d+u[ix]*u[iy]/u[iz];
}
d=-d;
for(i=kk; i<=m; i++) {
ix=(i-1)*m+j-1;
iy=(i-1)*m+kk-1;
u[ix]=u[ix]+d*u[iy];
}
}
for(i=kk; i<=m; i++) {
ix=(i-1)*m+kk-1;
u[ix]=-u[ix];
}
u[iz]=1.0+u[iz];
if(kk-1>=1)//////////////////////////////////////
for(i=1; i<=kk-1; i++)
u[(i-1)*m+kk-1]=0.0;
} else {
for(i=1; i<=m; i++)
u[(i-1)*m+kk-1]=0.0;
u[(kk-1)*m+kk-1]=1.0;
}
}
}
for(ll=1; ll<=n; ll++) {
kk=n-ll+1;
iz=kk*n+kk-1;
//if((kk<=l)&&(e[kk-1]!=0.0))/////////////////////////////
if((kk<=l)&&(fabs(e[kk-1])>MIN_DOUBLE)) {
for(j=kk+1; j<=n; j++) {
d=0.0;
for(i=kk+1; i<=n; i++) {
ix=(i-1)*n+kk-1;
iy=(i-1)*n+j-1;
d=d+v[ix]*v[iy]/v[iz];
}
d=-d;
for(i=kk+1; i<=n; i++) {
ix=(i-1)*n+j-1;
iy=(i-1)*n+kk-1;
v[ix]=v[ix]+d*v[iy];
}
}
}
for(i=1; i<=n; i++)
v[(i-1)*n+kk-1]=0.0;
v[iz-n]=1.0;
}
for(i=1; i<=m; i++)
for(j=1; j<=n; j++)
a[(i-1)*n+j-1]=0.0;
ml=mm;
it=MAX_ITERA;
while(1==1) { //////////////////////////////////
if(mm==0) {
pppself(a,e,s,v,m,n);
// free(s);free(e);free(w);
return l;
}
if(it==0) {
pppself(a,e,s,v,m,n);
// free(s);free(e);free(w);
return -1;
}
kk=mm-1;
//while((kk!=0)&&(fabs(e[kk-1])!=0.0))
while((kk!=0)&&(fabs(e[kk-1])>MIN_DOUBLE)) {
d=fabs(s[kk-1])+fabs(s[kk]);
dd=fabs(e[kk-1]);
if(dd>eps*d)
kk=kk-1;
else
e[kk-1]=0.0;
}
if(kk==mm-1) {
kk=kk+1;
if(s[kk-1]<0.0) {
s[kk-1]=-s[kk-1];
for(i=1; i<=n; i++) {
ix=(i-1)*n+kk-1;
v[ix]=-v[ix];
}
}
while((kk!=ml)&&(s[kk-1]<s[kk])) {
d=s[kk-1];
s[kk-1]=s[kk];
s[kk]=d;
if(kk<n)
for(i=1; i<=n; i++) {
ix=(i-1)*n+kk-1;
iy=(i-1)*n+kk;
d=v[ix];
v[ix]=v[iy];
v[iy]=d;
}
if(kk<m)
for(i=1; i<=m; i++) {
ix=(i-1)*m+kk-1;
iy=(i-1)*m+kk;
d=u[ix];
u[ix]=u[iy];
u[iy]=d;
}
kk=kk+1;
}
it=MAX_ITERA;
mm=mm-1;
} else {
ks=mm;
//while((ks>kk)&&(fabs(s[ks-1])!=0.0))
while((ks>kk)&&(fabs(s[ks-1])>MIN_DOUBLE)) {
d=0.0;
if(ks!=mm)
d=d+fabs(e[ks-1]);
if(ks!=kk+1) d=d+fabs(e[ks-2]);
dd=fabs(s[ks-1]);
if(dd>eps*d)
ks=ks-1;
else
s[ks-1]=0.0;
}
if(ks==kk) {
kk=kk+1;
d=fabs(s[mm-1]);
t=fabs(s[mm-2]);
if(t>d)
d=t;
t=fabs(e[mm-2]);
if(t>d)
d=t;
t=fabs(s[kk-1]);
if(t>d)
d=t;
t=fabs(e[kk-1]);
if(t>d)
d=t;
sm=s[mm-1]/d;
sml=s[mm-2]/d;
eml=e[mm-2]/d;
sk=s[kk-1]/d;
ek=e[kk-1]/d;
b=((sml+sm)*(sml-sm)+eml*eml)/2.0;
c=sm*eml;
c=c*c;
shh=0.0;
//if((b!=0.0)||(c!=0.0))
if((fabs(b)>MIN_DOUBLE)||(fabs(c)>MIN_DOUBLE)) {
shh=sqrt(b*b+c);
if(b<0.0)
shh=-shh;
shh=c/(b+shh);
}
fg[0]=(sk+sm)*(sk-sm)-shh;
fg[1]=sk*ek;
for(i=kk; i<=mm-1; i++) {
sssself(fg,cs);
if(i!=kk)
e[i-2]=fg[0];
fg[0]=cs[0]*s[i-1]+cs[1]*e[i-1];
e[i-1]=cs[0]*e[i-1]-cs[1]*s[i-1];
fg[1]=cs[1]*s[i];
s[i]=cs[0]*s[i];
//if((cs[0]!=1.0)||(cs[1]!=0.0))
if((fabs(cs[0]-1.0)>MIN_DOUBLE)||(fabs(cs[1])>MIN_DOUBLE))
for(j=1; j<=n; j++) {
ix=(j-1)*n+i-1;
iy=(j-1)*n+i;
d=cs[0]*v[ix]+cs[1]*v[iy];
v[iy]=-cs[1]*v[ix]+cs[0]*v[iy];
v[ix]=d;
}
sssself(fg,cs);
s[i-1]=fg[0];
fg[0]=cs[0]*e[i-1]+cs[1]*s[i];
s[i]=-cs[1]*e[i-1]+cs[0]*s[i];
fg[1]=cs[1]*e[i];
e[i]=cs[0]*e[i];
if(i<m)
//if((cs[0]!=1.0)||(cs[1]!=0.0))
if((fabs(cs[0]-1.0)>MIN_DOUBLE)||(fabs(cs[1])>MIN_DOUBLE))
for(j=1; j<=m; j++) {
ix=(j-1)*m+i-1;
iy=(j-1)*m+i;
d=cs[0]*u[ix]+cs[1]*u[iy];
u[iy]=-cs[1]*u[ix]+cs[0]*u[iy];
u[ix]=d;
}
}
e[mm-2]=fg[0];
it=it-1;
} else {
if(ks==mm) {
kk=kk+1;
fg[1]=e[mm-2];
e[mm-2]=0.0;
for(ll=kk; ll<=mm-1; ll++) {
i=mm+kk-ll-1;
fg[0]=s[i-1];
sssself(fg,cs);
s[i-1]=fg[0];
if(i!=kk) {
fg[1]=-cs[1]*e[i-2];
e[i-2]=cs[0]*e[i-2];
}
//if((cs[0]!=1.0)||(cs[1]!=0.0))
if((fabs(cs[0]-1.0)>MIN_DOUBLE)||(fabs(cs[1])>MIN_DOUBLE))
for(j=1; j<=n; j++) {
ix=(j-1)*n+i-1;
iy=(j-1)*n+mm-1;
d=cs[0]*v[ix]+cs[1]*v[iy];
v[iy]=-cs[1]*v[ix]+cs[0]*v[iy];
v[ix]=d;
}
}
} else {
kk=ks+1;
fg[1]=e[kk-2];
e[kk-2]=0.0;
for(i=kk; i<=mm; i++) {
fg[0]=s[i-1];
sssself(fg,cs);
s[i-1]=fg[0];
fg[1]=-cs[1]*e[i-1];
e[i-1]=cs[0]*e[i-1];
//if((cs[0]!=1.0)||(cs[1]!=0.0))
if((fabs(cs[0]-1.0)>MIN_DOUBLE)||(fabs(cs[1])>MIN_DOUBLE))
for(j=1; j<=m; j++) {
ix=(j-1)*m+i-1;
iy=(j-1)*m+kk-2;
d=cs[0]*u[ix]+cs[1]*u[iy];
u[iy]=-cs[1]*u[ix]+cs[0]*u[iy];
u[ix]=d;
}
}
}
}
}
}
free(s);free(e);free(w);
return l;
}
//GPU3x3
__device__ void Inverse(float *a){
float A=a[0]*a[4]*a[8]+a[1]*a[5]*a[6]+a[2]*a[3]*a[7]-a[2]*a[4]*a[6]-a[1]*a[3]*a[8]-a[0]*a[5]*a[7];
float b[9];
b[0]=a[4]*a[8]-a[7]*a[5];
b[1]=-1*(a[1]*a[8]-a[7]*a[2]);
b[2]=a[1]*a[5]-a[2]*a[4];
b[3]=a[5]*a[6]-a[8]*a[3];
b[4]=-1*(a[2]*a[6]-a[8]*a[0]);
b[5]=a[2]*a[3]-a[0]*a[5];
b[6]=a[3]*a[7]-a[6]*a[4];
b[7]=-1*(a[0]*a[7]-a[6]*a[1]);
b[8]=a[0]*a[4]-a[1]*a[3];
for(int i=0; i<9; i++) {
b[i]=(float)b[i]/A;
a[i]=b[i];
}
}
__device__ float computeself(float *X,float *X_t,float *W,float *y){
float WW[3*Ksize*Ksize];
float XX[3*3];
float YY[3*1];
float XXWW[3*Ksize*Ksize];
float XXy[3*1];
//
Mult(WW,X_t,W,3,Ksize*Ksize,Ksize*Ksize,Ksize*Ksize);
Mult(XX,WW,X,3,Ksize*Ksize,Ksize*Ksize,3);
Inverse(XX);
Mult(XXWW,XX,WW,3,3,3,Ksize*Ksize);
Mult(XXy,XXWW,y,3,Ksize*Ksize,Ksize*Ksize,1);
return (XXy[0]);
}
__constant__ float XXX[3*Ksize*Ksize];
__constant__ float XXXT[3*Ksize*Ksize];
__global__ void self_process(unsigned char *dev_original,unsigned char* dev_out,float *dev_X_t,float *dev_X,int R,int C){
int x=threadIdx.x;
int y=blockIdx.x;
int offset=x+y*blockDim.x;
while(offset<R*C){
float dev_y[Ksize*Ksize];
float dev_W[Ksize*Ksize*Ksize*Ksize];
float G[Ksize*Ksize*2];
float U[Ksize*Ksize*Ksize*Ksize];
float V[2*2];
float eps=0.000001;
int fx=offset/C-Ksize/2;
int fy=offset%C-Ksize/2;
for(int i=0;i<Ksize;i++){
for(int j=0;j<Ksize;j++){
if((fx+i)<0||(fy+j)<0||(fx+i)>=R||(fy+j)>=C)
dev_y[i*Ksize+j]=dev_original[offset];
else
dev_y[i*Ksize+j]=dev_original[(fx+i)*C+(fy+j)];
}
}
init_Gself(G,dev_y);
int re=dluavself(G,Ksize*Ksize,2,U,V,eps,Ksize*Ksize+1);
float sita=atan(V[1]/V[3]);
float gama;
float deta;
float C[4];
float s1;
float s2;
s1=G[0];
s2=G[3];
gama=sqrt((s1*s2+1)/25);
deta=(s1+1)/(s2+1);
C[0]=gama*(deta*cos(sita)*cos(sita)+1/deta*sin(sita)*sin(sita));
C[1]=gama*(1/deta*sin(sita)*cos(sita)-deta*cos(sita)*sin(sita));
C[2]=C[1];
C[3]=gama*(deta*sin(sita)*sin(sita)+1/deta*cos(sita)*cos(sita));
init_Wself(dev_W,C);
float value=computeself(XXX,XXXT,dev_W,dev_y);
// value/=1.1;
if(value>255){
value=255;
}
else if(value<0){
value=0;
}
// value/=1.3;
dev_out[offset]=(unsigned char)value;
offset=offset+gridDim.x*blockDim.x;
}
}
//**************************************************************************************//
extern "C" float Test_GPU(string name,int choice){
int liR=512;
int liC=512;
hipEvent_t start,end;
HANDLE_ERROR(hipEventCreate(&start));
HANDLE_ERROR(hipEventCreate(&end));
HANDLE_ERROR(hipEventRecord(start,0));
unsigned char *original;
unsigned char *out;
unsigned char *dev_original;
unsigned char *dev_out;
unsigned char *temp_original;
unsigned char *temp_out;
temp_original=(unsigned char*)malloc(liR*liC*sizeof(unsigned char));
temp_out=(unsigned char*)malloc(liR*liC*sizeof(unsigned char));
float *X;
float *X_t;
float *W;
float *dev_mid;
float *XX;
float *WW;
float *Mid;
float *dev_X_t;
float *dev_X;
Mat srcImage=imread(name);
cvtColor(srcImage, srcImage, CV_BGR2GRAY);
if(srcImage.channels()!=1)
return srcImage.channels();
Mat dstImage=srcImage.clone();
int R= srcImage.rows;
int C= srcImage.cols;
original=(unsigned char*)malloc(R*C*sizeof(unsigned char));
for(int i=0;i<R;i++){
for(int j=0;j<C;j++){
original[i*C+j]=srcImage.at<uchar>(i,j);
}
}
out=(unsigned char*)malloc(R*C*sizeof(unsigned char));
X=(float*)malloc(3*Ksize*Ksize*sizeof(float));
X_t=(float*)malloc(3*Ksize*Ksize*sizeof(float));
W=(float*)malloc(Ksize*Ksize*Ksize*Ksize*sizeof(float));
XX=(float*)malloc(sizeof(float)*3*Ksize*Ksize);
WW=(float*)malloc(sizeof(float)*3*Ksize*Ksize);
Mid=(float*)malloc(sizeof(float)*3*Ksize*Ksize);
init_X(X);
init_X_t(X_t);
init_Wcal(W);
HANDLE_ERROR(hipMalloc((void**)&dev_original,R*C*sizeof(unsigned char)));
HANDLE_ERROR(hipMalloc((void**)&dev_out,R*C*sizeof(unsigned char)));
//for classical
HANDLE_ERROR(hipMalloc((void**)&dev_mid,3*Ksize*Ksize*sizeof(float)));
mult(WW,X_t,W,3,Ksize*Ksize,Ksize*Ksize,Ksize*Ksize);
mult(XX,WW,X,3,Ksize*Ksize,Ksize*Ksize,3);
//for self
HANDLE_ERROR(hipMalloc((void**)&dev_X,3*Ksize*Ksize*Ksize*sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&dev_X_t,3*Ksize*Ksize*sizeof(float)));
HANDLE_ERROR(hipMemcpy(dev_X,X,3*Ksize*Ksize*sizeof(float),hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_X_t,X_t,3*Ksize*Ksize*sizeof(float),hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpyToSymbol(XXX,X,3*Ksize*Ksize*sizeof(float)));
HANDLE_ERROR(hipMemcpyToSymbol(XXXT,X_t,3*Ksize*Ksize*sizeof(float)));
if(inverse(XX)!=0)
mult(Mid,XX,WW,3,3,3,Ksize*Ksize);
HANDLE_ERROR(hipMemcpy(dev_mid,Mid,3*Ksize*Ksize*sizeof(float),hipMemcpyHostToDevice));
if(choice==3){
HANDLE_ERROR(hipMemcpy(dev_original,original,R*C*sizeof(unsigned char),hipMemcpyHostToDevice));
hipLaunchKernelGGL(( ave_process), dim3(DIM),dim3(DIM), 0, 0, dev_original,dev_out,R,C);
HANDLE_ERROR(hipMemcpy(out,dev_out,R*C*sizeof(unsigned char),hipMemcpyDeviceToHost));
}
else if(choice==4){
HANDLE_ERROR(hipMemcpy(dev_original,original,R*C*sizeof(unsigned char),hipMemcpyHostToDevice));
hipLaunchKernelGGL(( mid_process), dim3(DIM),dim3(DIM), 0, 0, dev_original,dev_out,R,C);
HANDLE_ERROR(hipMemcpy(out,dev_out,R*C*sizeof(unsigned char),hipMemcpyDeviceToHost));
}
else if(choice==1){
HANDLE_ERROR(hipMemcpy(dev_original,original,R*C*sizeof(unsigned char),hipMemcpyHostToDevice));
hipLaunchKernelGGL(( cla_process), dim3(DIM/2),dim3(DIM/2), 0, 0, dev_original,dev_out,dev_mid,R,C);
HANDLE_ERROR(hipMemcpy(out,dev_out,R*C*sizeof(unsigned char),hipMemcpyDeviceToHost));
}
else{
int numR=(R+255)/256;
int numC=(C+255)/256;
for(int i=0;i<numR;i++){
for(int j=0;j<numC;j++){
int off=i*liR*C+j*liC;
int tempR=liR;
int tempC=liC;
if(i==numR-1)
tempR=R-liR*i;
if(j==numC-1)
tempC=C-liC*j;
for(int i=0;i<tempR;i++){
for(int j=0;j<tempC;j++){
temp_original[i*tempC+j]=original[off+i*C+j];
}
}
HANDLE_ERROR(hipMemcpy(dev_original,temp_original,tempR*tempC*sizeof(unsigned char),hipMemcpyHostToDevice));
hipLaunchKernelGGL(( self_process), dim3(DIM/2),dim3(DIM/2), 0, 0, dev_original,dev_out,dev_X_t,dev_X,tempR,tempC);
HANDLE_ERROR(hipMemcpy(temp_out,dev_out,tempR*tempC*sizeof(unsigned char),hipMemcpyDeviceToHost));
for(int i=0;i<tempR;i++){
for(int j=0;j<tempC;j++){
out[off+i*C+j]=temp_out[i*tempC+j];
}
}
}
}
}
HANDLE_ERROR(hipEventRecord(end,0));
HANDLE_ERROR(hipEventSynchronize(end));
float time;
HANDLE_ERROR(hipEventElapsedTime(&time,start,end));
for(int i=0;i<R;i++){
for(int j=0;j<C;j++){
dstImage.at<uchar>(i,j)=out[i*C+j];
}
}
/*string name1=name+"classical";
string name2=name+"self";
string name3=name+"average";
string name4=name+"middle";*/
String tempname="";
int pos;
for(int i=0;i<name.length();i++){
if(name[i]=='.')
{
pos=i;
break;
}
}
if(choice==1){
tempname=name;
tempname.insert(pos,"_classical_GPU");
imshow("GPU",dstImage);
imwrite(tempname,dstImage);
}
else if(choice==2){
tempname=name;
tempname.insert(pos,"_adaptive_GPU");
imshow("GPU",dstImage);
imwrite(tempname,dstImage);
}
else if(choice==3){
tempname=name;
tempname.insert(pos,"_mean_GPU");
imshow("GPU",dstImage);
imwrite(tempname,dstImage);
}
else{
tempname=name;
tempname.insert(pos,"_median_GPU");
imshow("GPU",dstImage);
imwrite(tempname,dstImage);
}
// waitKey();
return time;
} | 92dbc76142581a71668525246445b9416c437bfb.cu | #include "test.h"
#include "book.h"
#include "cuda_runtime.h"
//均值滤波GPU**************************************************************************************//
__global__ void ave_process(unsigned char* dev_original,unsigned char *dev_out,int R,int C){
int x=threadIdx.x;
int y=blockIdx.x;
int offset=x+y*blockDim.x;
while(offset<R*C){
int dev_y[NKsize*NKsize];
int gx=offset/C;
int gy=offset%C;
int sum=0;
int fx=gx-NKsize/2;
int fy=gy-NKsize/2;
for(int i=0;i<NKsize;i++){
for(int j=0;j<NKsize;j++){
if((fx+i)<0||(fy+j)<0||(fx+i)>=R||(fy+j)>=C){
dev_y[i*NKsize+j]=dev_original[offset];
}
else{
dev_y[i*NKsize+j]=dev_original[(fx+i)*C+(fy+j)];
}
sum+=dev_y[i*NKsize+j];
}
}
dev_out[offset]=sum/(NKsize*NKsize);
// dev_out[offset]=dev_original[offset];
offset+=blockDim.x*gridDim.x;
}
}
//中值滤波GPU**************************************************************************************//
__global__ void mid_process(unsigned char *dev_original,unsigned char *dev_out,int R,int C){
int x=threadIdx.x;
int y=blockIdx.x;
int offset=x+y*blockDim.x;
while(offset<R*C){
int dev_y[NKsize*NKsize];
int gx=offset/C;
int gy=offset%C;
int fx=gx-NKsize/2;
int fy=gy-NKsize/2;
for(int i=0;i<NKsize;i++){
for(int j=0;j<NKsize;j++){
if((fx+i)<0||(fy+j)<0||(fx+i)>=R||(fy+j)>=C){
dev_y[i*NKsize+j]=dev_original[offset];
}
else{
dev_y[i*NKsize+j]=dev_original[(fx+i)*C+(fy+j)];
}
}
}
int temp;
for(int i=0;i<NKsize*NKsize-1;i++){
for(int j=0;j<NKsize*NKsize-1-i;j++){
if(dev_y[j]>dev_y[j+1]){
temp=dev_y[j];
dev_y[j]=dev_y[j+1];
dev_y[j+1]=temp;
}
}
}
dev_out[offset]=dev_y[NKsize*NKsize/2];
offset+=blockDim.x*gridDim.x;
}
}
//经典核回归**************************************************************************************//
float Kcal(int x1,int y1,int x0,int y0) {
float distance=(x1-x0)*(x1-x0)+(y1-y0)*(y1-y0);
float h=1;
float t=distance/h;
float xishu=1/((2*pai));
// float zhishu=(-0.5)*((x1-x0)*(x1-x0)+(y1-y0)*(y1-y0));
float r=xishu*exp(-0.5*t*t);
return r;
}
void init_Wcal(float *W) {
int offset;
int x0=Ksize/2;
int y0=Ksize/2;
for(int i=0; i<Ksize*Ksize; i++) {
for(int j=0; j<Ksize*Ksize; j++) {
if(i==j) {
int fx=i/Ksize;
int fy=i%Ksize;
W[i*Ksize*Ksize+j]=Kcal(fx,fy,x0,y0);
} else {
W[i*Ksize*Ksize+j]=0;
}
}
}
}
//GPU中的矩阵乘法运算
__device__ void Mult(float *c,float* a,float *b,int a_r,int a_c,int b_r,int b_c){
for(int i=0;i<a_r;i++){
for(int j=0;j<b_c;j++){
c[i*b_c+j]=0;
for(int k=0;k<a_c;k++){
c[i*b_c+j]+=a[i*a_c+k]*b[k*b_c+j];
}
}
}
}
__global__ void cla_process(unsigned char *dev_original,unsigned char *dev_out,float *dev_mid,int R,int C){
int x=blockIdx.x;
int y=threadIdx.x;
int offset=y+x*blockDim.x;
while(offset<R*C){
float dev_y[Ksize*Ksize];
float dev_res[3*1];
int fx=offset/C-Ksize/2;
int fy=offset%C-Ksize/2;
for(int i=0;i<Ksize;i++){
for(int j=0;j<Ksize;j++){
if((fx+i)<0||(fy+j)<0||(fx+i)>=R||(fy+j)>=C)
dev_y[i*Ksize+j]=dev_original[offset];
else
dev_y[i*Ksize+j]=dev_original[(fx+i)*C+(fy+j)];
}
}
Mult(dev_res,dev_mid,dev_y,3,Ksize*Ksize,Ksize*Ksize,1);
float value=dev_res[0];
// value/=1.3;
int grayvalue=(int )value;
if(grayvalue>255){
grayvalue=255;
}
else if(grayvalue<0){
grayvalue=0;
}
dev_out[offset]=(unsigned char)grayvalue;
offset=offset+blockDim.x*gridDim.x;
}
}
//自适应核回归**************************************************************************************//
__device__ float Kself(int x1,int y1,int x0,int y0,float *C){
float h=4;
float distance=(x1-x0)*(x1-x0)*C[0]+(x1-x0)*(y1-y0)*C[2]+(x1-x0)*(y1-y0)*C[1]+(y1-y0)*(y1-y0)*C[3];
float xishu=sqrt(C[0]*C[3]-C[1]*C[2])/(2*pai*h*h);
float r=xishu*exp(-0.5*distance/(h*h));
return r;
}
//初始化W矩阵 (Ksize2 x Ksize2)
__device__ void init_Wself(float* W,float *C){
int offset;
int x0=Ksize/2;
int y0=Ksize/2;
for(int i=0;i<Ksize*Ksize;i++){
for(int j=0;j<Ksize*Ksize;j++){
if(i==j){
int fx=i/Ksize;
int fy=i%Ksize;
W[i*Ksize*Ksize+j]=Kself(fx,fy,x0,y0,C);
}
else{
W[i*Ksize*Ksize+j]=0;
}
}
}
}
//初始化梯度矩阵
__device__ void init_Gself(float *G,float *y){
int fx=Ksize/2;
int fy=Ksize/2;
int num=0;
for(int i=0;i<Ksize;i++){
for(int j=0;j<Ksize;j++){
if(i!=fx){
G[num*2]=(y[i*Ksize+j]-y[fx*Ksize+fy])/(i-fx);
}
else{
G[num*2]=0;
}
if(j!=fy){
G[num*2+1]=(y[i*Ksize+j]-y[fx*Ksize+fy])/(j-fy);
}
else{
G[num*2+1]=0;
}
/* if(i+1>=Ksize){
G[num*2]=0;
}
else{
G[num*2]=(y[(i+1)*Ksize+j]-y[i*Ksize+j]);
}
if(j+1>=Ksize){
G[num*2+1]=0;
}
else{
G[num*2+1]=y[i*Ksize+j+1]-y[i*Ksize+j];
}*/
num++;
}
}
}
__device__ void pppself(float a[],float e[],float s[],float v[],int m,int n) {
int i,j,p,q;
double d;
if(m>=n)
i=n;
else
i=m;
for(j=1; j<=i-1; j++) {
a[(j-1)*n+j-1]=s[j-1];
a[(j-1)*n+j]=e[j-1];
}
a[(i-1)*n+i-1]=s[i-1];
if(m<n)
a[(i-1)*n+i]=e[i-1];
for(i=1; i<=n-1; i++)
for(j=i+1; j<=n; j++) {
p=(i-1)*n+j-1;
q=(j-1)*n+i-1;
d=v[p];
v[p]=v[q];
v[q]=d;
}
return;
}
__device__ void sssself(float fg[2],float cs[2]) {
float r,d;
//if((fabs(fg[0])+fabs(fg[1]))==0.0)
if((fabs(fg[0])+fabs(fg[1]))<MIN_DOUBLE) {
cs[0]=1.0;
cs[1]=0.0;
d=0.0;
} else {
d=sqrt(fg[0]*fg[0]+fg[1]*fg[1]);
if(fabs(fg[0])>fabs(fg[1])) {
d=fabs(d);
if(fg[0]<0.0)
d=-d;
}
if(fabs(fg[1])>=fabs(fg[0])) {
d=fabs(d);
if(fg[1]<0.0)
d=-d;
}
cs[0]=fg[0]/d;
cs[1]=fg[1]/d;
}
r=1.0;
if(fabs(fg[0])>fabs(fg[1]))
r=cs[1];
else
//if(cs[0]!=0.0)
if(fabs(cs[0])>MIN_DOUBLE)
r=1.0/cs[0];
fg[0]=d;
fg[1]=r;
return;
}
//对矩阵进行奇异值分解
__device__ int dluavself(float a[],int m,int n,float u[],float v[],float eps,int ka){
int i,j,k,l,it,ll,kk,ix,iy,mm,nn,iz,ml,ks;
float d,dd,t,sm,sml,eml,sk,ek,b,c,shh,fg[2],cs[2];
float s[26];
float w[26];
float e[26];
for(i=1; i<=m; i++) {
ix=(i-1)*m+i-1;
u[ix]=0;
}
for(i=1; i<=n; i++) {
iy=(i-1)*n+i-1;
v[iy]=0;
}
it=MAX_ITERA;
k=n;
if(m-1<n)
k=m-1;
l=m;
if(n-2<m) l=n-2;
if(l<0) l=0;
ll=k;
if(l>k) ll=l;
if(ll>=1) {
for(kk=1; kk<=ll; kk++) {
if(kk<=k) {
d=0.0;
for(i=kk; i<=m; i++) {
ix=(i-1)*n+kk-1;
d=d+a[ix]*a[ix];
}
s[kk-1]=sqrt(d);
//if(s[kk-1]!=0.0)
if(fabs(s[kk-1])>MIN_DOUBLE) {
ix=(kk-1)*n+kk-1;
//if(a[ix]!=0.0)
if(fabs(a[ix])>MIN_DOUBLE) {
s[kk-1]=fabs(s[kk-1]);
if(a[ix]<0.0) s[kk-1]=-s[kk-1];
}
for(i=kk; i<=m; i++) {
iy=(i-1)*n+kk-1;
a[iy]=a[iy]/s[kk-1];
}
a[ix]=1.0+a[ix];
}
s[kk-1]=-s[kk-1];
}
if(n>=kk+1) {
for(j=kk+1; j<=n; j++) {
//if((kk<=k)&&(s[kk-1]!=0.0))
if((kk<=k)&&(fabs(s[kk-1])>MIN_DOUBLE)) {
d=0.0;
for(i=kk; i<=m; i++) {
ix=(i-1)*n+kk-1;
iy=(i-1)*n+j-1;
d=d+a[ix]*a[iy];
}
d=-d/a[(kk-1)*n+kk-1];
for(i=kk; i<=m; i++) {
ix=(i-1)*n+j-1;
iy=(i-1)*n+kk-1;
a[ix]=a[ix]+d*a[iy];
}
}
e[j-1]=a[(kk-1)*n+j-1];
}
}
if(kk<=k) {
for(i=kk; i<=m; i++) {
ix=(i-1)*m+kk-1;
iy=(i-1)*n+kk-1;
u[ix]=a[iy];
}
}
if(kk<=l) {
d=0.0;
for(i=kk+1; i<=n; i++)
d=d+e[i-1]*e[i-1];
e[kk-1]=sqrt(d);
//if(e[kk-1]!=0.0)
if(fabs(e[kk-1])>MIN_DOUBLE) {
//if(e[kk]!=0.0)
if(fabs(e[kk])>MIN_DOUBLE) {
e[kk-1]=fabs(e[kk-1]);
if(e[kk]<0.0)
e[kk-1]=-e[kk-1];
}
for(i=kk+1; i<=n; i++)
e[i-1]=e[i-1]/e[kk-1];
e[kk]=1.0+e[kk];
}
e[kk-1]=-e[kk-1];
//if((kk+1<=m)&&(e[kk-1]!=0.0))
if((kk+1<=m)&&(fabs(e[kk-1])>MIN_DOUBLE)) {
for(i=kk+1; i<=m; i++) w[i-1]=0.0;
for(j=kk+1; j<=n; j++)
for(i=kk+1; i<=m; i++)
w[i-1]=w[i-1]+e[j-1]*a[(i-1)*n+j-1];
for(j=kk+1; j<=n; j++)
for(i=kk+1; i<=m; i++) {
ix=(i-1)*n+j-1;
a[ix]=a[ix]-w[i-1]*e[j-1]/e[kk];
}
}
for(i=kk+1; i<=n; i++)
v[(i-1)*n+kk-1]=e[i-1];
}
}
}
mm=n;
if(m+1<n) mm=m+1;
if(k<n) s[k]=a[k*n+k];
if(m<mm) s[mm-1]=0.0;
if(l+1<mm) e[l]=a[l*n+mm-1];
e[mm-1]=0.0;
nn=m;
if(m>n) nn=n;
if(nn>=k+1) {
for(j=k+1; j<=nn; j++) {
for(i=1; i<=m; i++)
u[(i-1)*m+j-1]=0.0;
u[(j-1)*m+j-1]=1.0;
}
}
if(k>=1) { /////////////////////////////////
for(ll=1; ll<=k; ll++) {
kk=k-ll+1;
iz=(kk-1)*m+kk-1;
//if(s[kk-1]!=0.0)
if(fabs(s[kk-1])>MIN_DOUBLE) {
if(nn>=kk+1)
for(j=kk+1; j<=nn; j++) {
d=0.0;
for(i=kk; i<=m; i++) {
ix=(i-1)*m+kk-1;
iy=(i-1)*m+j-1;
d=d+u[ix]*u[iy]/u[iz];
}
d=-d;
for(i=kk; i<=m; i++) {
ix=(i-1)*m+j-1;
iy=(i-1)*m+kk-1;
u[ix]=u[ix]+d*u[iy];
}
}
for(i=kk; i<=m; i++) {
ix=(i-1)*m+kk-1;
u[ix]=-u[ix];
}
u[iz]=1.0+u[iz];
if(kk-1>=1)//////////////////////////////////////
for(i=1; i<=kk-1; i++)
u[(i-1)*m+kk-1]=0.0;
} else {
for(i=1; i<=m; i++)
u[(i-1)*m+kk-1]=0.0;
u[(kk-1)*m+kk-1]=1.0;
}
}
}
for(ll=1; ll<=n; ll++) {
kk=n-ll+1;
iz=kk*n+kk-1;
//if((kk<=l)&&(e[kk-1]!=0.0))/////////////////////////////
if((kk<=l)&&(fabs(e[kk-1])>MIN_DOUBLE)) {
for(j=kk+1; j<=n; j++) {
d=0.0;
for(i=kk+1; i<=n; i++) {
ix=(i-1)*n+kk-1;
iy=(i-1)*n+j-1;
d=d+v[ix]*v[iy]/v[iz];
}
d=-d;
for(i=kk+1; i<=n; i++) {
ix=(i-1)*n+j-1;
iy=(i-1)*n+kk-1;
v[ix]=v[ix]+d*v[iy];
}
}
}
for(i=1; i<=n; i++)
v[(i-1)*n+kk-1]=0.0;
v[iz-n]=1.0;
}
for(i=1; i<=m; i++)
for(j=1; j<=n; j++)
a[(i-1)*n+j-1]=0.0;
ml=mm;
it=MAX_ITERA;
while(1==1) { //////////////////////////////////
if(mm==0) {
pppself(a,e,s,v,m,n);
// free(s);free(e);free(w);
return l;
}
if(it==0) {
pppself(a,e,s,v,m,n);
// free(s);free(e);free(w);
return -1;
}
kk=mm-1;
//while((kk!=0)&&(fabs(e[kk-1])!=0.0))
while((kk!=0)&&(fabs(e[kk-1])>MIN_DOUBLE)) {
d=fabs(s[kk-1])+fabs(s[kk]);
dd=fabs(e[kk-1]);
if(dd>eps*d)
kk=kk-1;
else
e[kk-1]=0.0;
}
if(kk==mm-1) {
kk=kk+1;
if(s[kk-1]<0.0) {
s[kk-1]=-s[kk-1];
for(i=1; i<=n; i++) {
ix=(i-1)*n+kk-1;
v[ix]=-v[ix];
}
}
while((kk!=ml)&&(s[kk-1]<s[kk])) {
d=s[kk-1];
s[kk-1]=s[kk];
s[kk]=d;
if(kk<n)
for(i=1; i<=n; i++) {
ix=(i-1)*n+kk-1;
iy=(i-1)*n+kk;
d=v[ix];
v[ix]=v[iy];
v[iy]=d;
}
if(kk<m)
for(i=1; i<=m; i++) {
ix=(i-1)*m+kk-1;
iy=(i-1)*m+kk;
d=u[ix];
u[ix]=u[iy];
u[iy]=d;
}
kk=kk+1;
}
it=MAX_ITERA;
mm=mm-1;
} else {
ks=mm;
//while((ks>kk)&&(fabs(s[ks-1])!=0.0))
while((ks>kk)&&(fabs(s[ks-1])>MIN_DOUBLE)) {
d=0.0;
if(ks!=mm)
d=d+fabs(e[ks-1]);
if(ks!=kk+1) d=d+fabs(e[ks-2]);
dd=fabs(s[ks-1]);
if(dd>eps*d)
ks=ks-1;
else
s[ks-1]=0.0;
}
if(ks==kk) {
kk=kk+1;
d=fabs(s[mm-1]);
t=fabs(s[mm-2]);
if(t>d)
d=t;
t=fabs(e[mm-2]);
if(t>d)
d=t;
t=fabs(s[kk-1]);
if(t>d)
d=t;
t=fabs(e[kk-1]);
if(t>d)
d=t;
sm=s[mm-1]/d;
sml=s[mm-2]/d;
eml=e[mm-2]/d;
sk=s[kk-1]/d;
ek=e[kk-1]/d;
b=((sml+sm)*(sml-sm)+eml*eml)/2.0;
c=sm*eml;
c=c*c;
shh=0.0;
//if((b!=0.0)||(c!=0.0))
if((fabs(b)>MIN_DOUBLE)||(fabs(c)>MIN_DOUBLE)) {
shh=sqrt(b*b+c);
if(b<0.0)
shh=-shh;
shh=c/(b+shh);
}
fg[0]=(sk+sm)*(sk-sm)-shh;
fg[1]=sk*ek;
for(i=kk; i<=mm-1; i++) {
sssself(fg,cs);
if(i!=kk)
e[i-2]=fg[0];
fg[0]=cs[0]*s[i-1]+cs[1]*e[i-1];
e[i-1]=cs[0]*e[i-1]-cs[1]*s[i-1];
fg[1]=cs[1]*s[i];
s[i]=cs[0]*s[i];
//if((cs[0]!=1.0)||(cs[1]!=0.0))
if((fabs(cs[0]-1.0)>MIN_DOUBLE)||(fabs(cs[1])>MIN_DOUBLE))
for(j=1; j<=n; j++) {
ix=(j-1)*n+i-1;
iy=(j-1)*n+i;
d=cs[0]*v[ix]+cs[1]*v[iy];
v[iy]=-cs[1]*v[ix]+cs[0]*v[iy];
v[ix]=d;
}
sssself(fg,cs);
s[i-1]=fg[0];
fg[0]=cs[0]*e[i-1]+cs[1]*s[i];
s[i]=-cs[1]*e[i-1]+cs[0]*s[i];
fg[1]=cs[1]*e[i];
e[i]=cs[0]*e[i];
if(i<m)
//if((cs[0]!=1.0)||(cs[1]!=0.0))
if((fabs(cs[0]-1.0)>MIN_DOUBLE)||(fabs(cs[1])>MIN_DOUBLE))
for(j=1; j<=m; j++) {
ix=(j-1)*m+i-1;
iy=(j-1)*m+i;
d=cs[0]*u[ix]+cs[1]*u[iy];
u[iy]=-cs[1]*u[ix]+cs[0]*u[iy];
u[ix]=d;
}
}
e[mm-2]=fg[0];
it=it-1;
} else {
if(ks==mm) {
kk=kk+1;
fg[1]=e[mm-2];
e[mm-2]=0.0;
for(ll=kk; ll<=mm-1; ll++) {
i=mm+kk-ll-1;
fg[0]=s[i-1];
sssself(fg,cs);
s[i-1]=fg[0];
if(i!=kk) {
fg[1]=-cs[1]*e[i-2];
e[i-2]=cs[0]*e[i-2];
}
//if((cs[0]!=1.0)||(cs[1]!=0.0))
if((fabs(cs[0]-1.0)>MIN_DOUBLE)||(fabs(cs[1])>MIN_DOUBLE))
for(j=1; j<=n; j++) {
ix=(j-1)*n+i-1;
iy=(j-1)*n+mm-1;
d=cs[0]*v[ix]+cs[1]*v[iy];
v[iy]=-cs[1]*v[ix]+cs[0]*v[iy];
v[ix]=d;
}
}
} else {
kk=ks+1;
fg[1]=e[kk-2];
e[kk-2]=0.0;
for(i=kk; i<=mm; i++) {
fg[0]=s[i-1];
sssself(fg,cs);
s[i-1]=fg[0];
fg[1]=-cs[1]*e[i-1];
e[i-1]=cs[0]*e[i-1];
//if((cs[0]!=1.0)||(cs[1]!=0.0))
if((fabs(cs[0]-1.0)>MIN_DOUBLE)||(fabs(cs[1])>MIN_DOUBLE))
for(j=1; j<=m; j++) {
ix=(j-1)*m+i-1;
iy=(j-1)*m+kk-2;
d=cs[0]*u[ix]+cs[1]*u[iy];
u[iy]=-cs[1]*u[ix]+cs[0]*u[iy];
u[ix]=d;
}
}
}
}
}
}
free(s);free(e);free(w);
return l;
}
//GPU中对3x3矩阵取逆
__device__ void Inverse(float *a){
float A=a[0]*a[4]*a[8]+a[1]*a[5]*a[6]+a[2]*a[3]*a[7]-a[2]*a[4]*a[6]-a[1]*a[3]*a[8]-a[0]*a[5]*a[7];
float b[9];
b[0]=a[4]*a[8]-a[7]*a[5];
b[1]=-1*(a[1]*a[8]-a[7]*a[2]);
b[2]=a[1]*a[5]-a[2]*a[4];
b[3]=a[5]*a[6]-a[8]*a[3];
b[4]=-1*(a[2]*a[6]-a[8]*a[0]);
b[5]=a[2]*a[3]-a[0]*a[5];
b[6]=a[3]*a[7]-a[6]*a[4];
b[7]=-1*(a[0]*a[7]-a[6]*a[1]);
b[8]=a[0]*a[4]-a[1]*a[3];
for(int i=0; i<9; i++) {
b[i]=(float)b[i]/A;
a[i]=b[i];
}
}
__device__ float computeself(float *X,float *X_t,float *W,float *y){
float WW[3*Ksize*Ksize];
float XX[3*3];
float YY[3*1];
float XXWW[3*Ksize*Ksize];
float XXy[3*1];
//上面的三个矩阵空间均有冗余
Mult(WW,X_t,W,3,Ksize*Ksize,Ksize*Ksize,Ksize*Ksize);
Mult(XX,WW,X,3,Ksize*Ksize,Ksize*Ksize,3);
Inverse(XX);
Mult(XXWW,XX,WW,3,3,3,Ksize*Ksize);
Mult(XXy,XXWW,y,3,Ksize*Ksize,Ksize*Ksize,1);
return (XXy[0]);
}
__constant__ float XXX[3*Ksize*Ksize];
__constant__ float XXXT[3*Ksize*Ksize];
__global__ void self_process(unsigned char *dev_original,unsigned char* dev_out,float *dev_X_t,float *dev_X,int R,int C){
int x=threadIdx.x;
int y=blockIdx.x;
int offset=x+y*blockDim.x;
while(offset<R*C){
float dev_y[Ksize*Ksize];
float dev_W[Ksize*Ksize*Ksize*Ksize];
float G[Ksize*Ksize*2];
float U[Ksize*Ksize*Ksize*Ksize];
float V[2*2];
float eps=0.000001;
int fx=offset/C-Ksize/2;
int fy=offset%C-Ksize/2;
for(int i=0;i<Ksize;i++){
for(int j=0;j<Ksize;j++){
if((fx+i)<0||(fy+j)<0||(fx+i)>=R||(fy+j)>=C)
dev_y[i*Ksize+j]=dev_original[offset];
else
dev_y[i*Ksize+j]=dev_original[(fx+i)*C+(fy+j)];
}
}
init_Gself(G,dev_y);
int re=dluavself(G,Ksize*Ksize,2,U,V,eps,Ksize*Ksize+1);
float sita=atan(V[1]/V[3]);
float gama;
float deta;
float C[4];
float s1;
float s2;
s1=G[0];
s2=G[3];
gama=sqrt((s1*s2+1)/25);
deta=(s1+1)/(s2+1);
C[0]=gama*(deta*cos(sita)*cos(sita)+1/deta*sin(sita)*sin(sita));
C[1]=gama*(1/deta*sin(sita)*cos(sita)-deta*cos(sita)*sin(sita));
C[2]=C[1];
C[3]=gama*(deta*sin(sita)*sin(sita)+1/deta*cos(sita)*cos(sita));
init_Wself(dev_W,C);
float value=computeself(XXX,XXXT,dev_W,dev_y);
// value/=1.1;
if(value>255){
value=255;
}
else if(value<0){
value=0;
}
// value/=1.3;
dev_out[offset]=(unsigned char)value;
offset=offset+gridDim.x*blockDim.x;
}
}
//主函数**************************************************************************************//
extern "C" float Test_GPU(string name,int choice){
int liR=512;
int liC=512;
cudaEvent_t start,end;
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&end));
HANDLE_ERROR(cudaEventRecord(start,0));
unsigned char *original;
unsigned char *out;
unsigned char *dev_original;
unsigned char *dev_out;
unsigned char *temp_original;
unsigned char *temp_out;
temp_original=(unsigned char*)malloc(liR*liC*sizeof(unsigned char));
temp_out=(unsigned char*)malloc(liR*liC*sizeof(unsigned char));
float *X;
float *X_t;
float *W;
float *dev_mid;
float *XX;
float *WW;
float *Mid;
float *dev_X_t;
float *dev_X;
Mat srcImage=imread(name);
cvtColor(srcImage, srcImage, CV_BGR2GRAY);
if(srcImage.channels()!=1)
return srcImage.channels();
Mat dstImage=srcImage.clone();
int R= srcImage.rows;
int C= srcImage.cols;
original=(unsigned char*)malloc(R*C*sizeof(unsigned char));
for(int i=0;i<R;i++){
for(int j=0;j<C;j++){
original[i*C+j]=srcImage.at<uchar>(i,j);
}
}
out=(unsigned char*)malloc(R*C*sizeof(unsigned char));
X=(float*)malloc(3*Ksize*Ksize*sizeof(float));
X_t=(float*)malloc(3*Ksize*Ksize*sizeof(float));
W=(float*)malloc(Ksize*Ksize*Ksize*Ksize*sizeof(float));
XX=(float*)malloc(sizeof(float)*3*Ksize*Ksize);
WW=(float*)malloc(sizeof(float)*3*Ksize*Ksize);
Mid=(float*)malloc(sizeof(float)*3*Ksize*Ksize);
init_X(X);
init_X_t(X_t);
init_Wcal(W);
HANDLE_ERROR(cudaMalloc((void**)&dev_original,R*C*sizeof(unsigned char)));
HANDLE_ERROR(cudaMalloc((void**)&dev_out,R*C*sizeof(unsigned char)));
//for classical
HANDLE_ERROR(cudaMalloc((void**)&dev_mid,3*Ksize*Ksize*sizeof(float)));
mult(WW,X_t,W,3,Ksize*Ksize,Ksize*Ksize,Ksize*Ksize);
mult(XX,WW,X,3,Ksize*Ksize,Ksize*Ksize,3);
//for self
HANDLE_ERROR(cudaMalloc((void**)&dev_X,3*Ksize*Ksize*Ksize*sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&dev_X_t,3*Ksize*Ksize*sizeof(float)));
HANDLE_ERROR(cudaMemcpy(dev_X,X,3*Ksize*Ksize*sizeof(float),cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_X_t,X_t,3*Ksize*Ksize*sizeof(float),cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpyToSymbol(XXX,X,3*Ksize*Ksize*sizeof(float)));
HANDLE_ERROR(cudaMemcpyToSymbol(XXXT,X_t,3*Ksize*Ksize*sizeof(float)));
if(inverse(XX)!=0)
mult(Mid,XX,WW,3,3,3,Ksize*Ksize);
HANDLE_ERROR(cudaMemcpy(dev_mid,Mid,3*Ksize*Ksize*sizeof(float),cudaMemcpyHostToDevice));
if(choice==3){
HANDLE_ERROR(cudaMemcpy(dev_original,original,R*C*sizeof(unsigned char),cudaMemcpyHostToDevice));
ave_process<<<DIM,DIM>>>(dev_original,dev_out,R,C);
HANDLE_ERROR(cudaMemcpy(out,dev_out,R*C*sizeof(unsigned char),cudaMemcpyDeviceToHost));
}
else if(choice==4){
HANDLE_ERROR(cudaMemcpy(dev_original,original,R*C*sizeof(unsigned char),cudaMemcpyHostToDevice));
mid_process<<<DIM,DIM>>>(dev_original,dev_out,R,C);
HANDLE_ERROR(cudaMemcpy(out,dev_out,R*C*sizeof(unsigned char),cudaMemcpyDeviceToHost));
}
else if(choice==1){
HANDLE_ERROR(cudaMemcpy(dev_original,original,R*C*sizeof(unsigned char),cudaMemcpyHostToDevice));
cla_process<<<DIM/2,DIM/2>>>(dev_original,dev_out,dev_mid,R,C);
HANDLE_ERROR(cudaMemcpy(out,dev_out,R*C*sizeof(unsigned char),cudaMemcpyDeviceToHost));
}
else{
int numR=(R+255)/256;
int numC=(C+255)/256;
for(int i=0;i<numR;i++){
for(int j=0;j<numC;j++){
int off=i*liR*C+j*liC;
int tempR=liR;
int tempC=liC;
if(i==numR-1)
tempR=R-liR*i;
if(j==numC-1)
tempC=C-liC*j;
for(int i=0;i<tempR;i++){
for(int j=0;j<tempC;j++){
temp_original[i*tempC+j]=original[off+i*C+j];
}
}
HANDLE_ERROR(cudaMemcpy(dev_original,temp_original,tempR*tempC*sizeof(unsigned char),cudaMemcpyHostToDevice));
self_process<<<DIM/2,DIM/2>>>(dev_original,dev_out,dev_X_t,dev_X,tempR,tempC);
HANDLE_ERROR(cudaMemcpy(temp_out,dev_out,tempR*tempC*sizeof(unsigned char),cudaMemcpyDeviceToHost));
for(int i=0;i<tempR;i++){
for(int j=0;j<tempC;j++){
out[off+i*C+j]=temp_out[i*tempC+j];
}
}
}
}
}
HANDLE_ERROR(cudaEventRecord(end,0));
HANDLE_ERROR(cudaEventSynchronize(end));
float time;
HANDLE_ERROR(cudaEventElapsedTime(&time,start,end));
for(int i=0;i<R;i++){
for(int j=0;j<C;j++){
dstImage.at<uchar>(i,j)=out[i*C+j];
}
}
/*string name1=name+"classical";
string name2=name+"self";
string name3=name+"average";
string name4=name+"middle";*/
String tempname="";
int pos;
for(int i=0;i<name.length();i++){
if(name[i]=='.')
{
pos=i;
break;
}
}
if(choice==1){
tempname=name;
tempname.insert(pos,"_classical_GPU");
imshow("经典核回归GPU",dstImage);
imwrite(tempname,dstImage);
}
else if(choice==2){
tempname=name;
tempname.insert(pos,"_adaptive_GPU");
imshow("自适应核回归GPU",dstImage);
imwrite(tempname,dstImage);
}
else if(choice==3){
tempname=name;
tempname.insert(pos,"_mean_GPU");
imshow("均值滤波GPU",dstImage);
imwrite(tempname,dstImage);
}
else{
tempname=name;
tempname.insert(pos,"_median_GPU");
imshow("中值滤波GPU",dstImage);
imwrite(tempname,dstImage);
}
// waitKey();
return time;
} |
b04bded637e13207398ef52b99ce1967622a3ea5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
* See COPYRIGHT.txt for license information
*/
#include <stdio.h>
#include <iostream>
#include "mpi.h"
#include "nvshmem.h"
#include "nvshmemx.h"
#include "time.cuh"
#undef CUDA_CHECK
#define CUDA_CHECK(stmt) \
do { \
hipError_t result = (stmt); \
if (hipSuccess != result) { \
fprintf(stderr, "[%s:%d] cuda failed with %s \n",\
__FILE__, __LINE__, hipGetErrorString(result));\
exit(-1); \
} \
} while (0)
#define MPI_CHECK(stmt) \
do { \
int result = (stmt); \
if (MPI_SUCCESS != result) { \
fprintf(stderr, "[%s:%d] MPI failed with error %d \n",\
__FILE__, __LINE__, result); \
exit(-1); \
} \
} while (0)
#define TID (threadIdx.x+blockIdx.x*blockDim.x)
#define WARPID ((threadIdx.x+blockIdx.x*blockDim.x)>>5)
__global__ void long_band_block(int num_messages, int message_size, long long *remote_buffer, long long *local_buffer, int remote_pe)
{
for(int i = blockIdx.x; i<num_messages; i+=gridDim.x)
nvshmemx_longlong_put_block(remote_buffer+message_size*i, local_buffer+message_size*i, message_size, remote_pe);
}
__global__ void long_band_warp(int num_messages, int message_size, long long *remote_buffer, long long *local_buffer, int remote_pe)
{
for(int i = (TID>>5); i<num_messages; i+=((blockDim.x*gridDim.x)>>5))
nvshmemx_longlong_put_warp(remote_buffer+message_size*i, local_buffer+message_size*i, message_size, remote_pe);
}
__global__ void long_band_thread(int num_messages, int message_size, long long *remote_buffer, long long *local_buffer, int remote_pe)
{
for(int i = TID; i<num_messages; i+=blockDim.x*gridDim.x)
nvshmem_longlong_put(remote_buffer+message_size*i, local_buffer+message_size*i, message_size, remote_pe);
}
int main (int c, char *v[])
{
int rank, nranks;
MPI_Comm mpi_comm;
nvshmemx_init_attr_t attr;
int mype, npes;
MPI_CHECK(MPI_Init(&c, &v));
MPI_CHECK(MPI_Comm_rank(MPI_COMM_WORLD, &rank));
MPI_CHECK(MPI_Comm_size(MPI_COMM_WORLD, &nranks));
mpi_comm = MPI_COMM_WORLD;
attr.mpi_comm = &mpi_comm;
nvshmemx_init_attr (NVSHMEMX_INIT_WITH_MPI_COMM, &attr);
mype = nvshmem_my_pe();
npes = nvshmem_n_pes();
//application picks the device each PE will use
int deviceCount = 0;
hipGetDeviceCount(&deviceCount);
printf("[%d] has %d GPUs, setDevice on GPU %d\n", mype, deviceCount, mype%deviceCount);
CUDA_CHECK(hipSetDevice(mype%deviceCount));
int bytes = 1<<30;
char * remote_buffer = (char *)nvshmem_malloc(sizeof(char)*bytes);
char * local_buffer;
local_buffer = (char *)nvshmem_malloc(sizeof(char)*bytes);
GpuTimer timer;
float totaltime = 0.0;
int message_bytes = 1024;
int num_messages = bytes/message_bytes;
hipStream_t *streams;
streams = (hipStream_t *)malloc(sizeof(hipStream_t)*(npes-1));
for(int i=0; i<npes-1; i++)
hipStreamCreateWithFlags(streams+i, hipStreamNonBlocking);
int numBlock = 160;
int numThread = 1024;
int num_rounds = 20;
CUDA_CHECK(hipOccupancyMaxPotentialBlockSize(&numBlock, &numThread, (void *)long_band_block));
nvshmem_barrier_all();
std::cout << mype << " send "<< bytes << " bytes to "<< npes-1 << " GPUs with message size(bytes) "<< message_bytes << " using nvshmem_longlong_put_block using threads: "<< numBlock << "x"<< numThread << std::endl;
nvshmem_barrier_all();
for(int round = 0; round < num_rounds; round++)
{
int remote_pe = (mype+1)%npes;
for(int j=0; j<npes-1; j++)
{
timer.Start();
hipLaunchKernelGGL(( long_band_block), dim3(numBlock), dim3(numThread), 0, streams[0], num_messages, message_bytes/sizeof(long long), (long long *)remote_buffer, (long long *)local_buffer, remote_pe);
remote_pe = (remote_pe+1) % npes;
}
hipDeviceSynchronize();
timer.Stop();
totaltime = totaltime + timer.ElapsedMillis();
}
nvshmem_barrier_all();
totaltime = totaltime/num_rounds;
std::cout << "PE "<<mype << " average time: " << totaltime << " bandwidth: "<<(bytes*(npes-1)/(totaltime/1000)/(1024*1024*1024))<<" GB/s"<<std::endl;
nvshmem_barrier_all();
if(mype == 0)
std::cout << "-------------------------------\n";
nvshmem_barrier_all();
totaltime = 0.0;
numBlock = numBlock*32;
numThread = numThread/32;
std::cout << mype << " send "<< bytes << " bytes to "<< npes-1 << " GPUs with message size(bytes) "<< message_bytes << " using nvshmem_longlong_put_block using threads: "<< numBlock << "x"<< numThread << std::endl;
nvshmem_barrier_all();
for(int round = 0; round < num_rounds; round++)
{
int remote_pe = (mype+1)%npes;
for(int j=0; j<npes-1; j++)
{
timer.Start();
hipLaunchKernelGGL(( long_band_block), dim3(numBlock), dim3(numThread), 0, streams[0], num_messages, message_bytes/sizeof(long long), (long long *)remote_buffer, (long long *)local_buffer, remote_pe);
remote_pe = (remote_pe+1) % npes;
}
hipDeviceSynchronize();
timer.Stop();
totaltime = totaltime + timer.ElapsedMillis();
}
nvshmem_barrier_all();
totaltime = totaltime/num_rounds;
std::cout << "PE "<<mype << " average time: " << totaltime << " bandwidth: "<<(bytes*(npes-1)/(totaltime/1000)/(1024*1024*1024))<<" GB/s"<<std::endl;
nvshmem_barrier_all();
if(mype == 0)
std::cout << "-------------------------------\n";
nvshmem_barrier_all();
CUDA_CHECK(hipOccupancyMaxPotentialBlockSize(&numBlock, &numThread, (void *)long_band_warp));
totaltime = 0.0;
nvshmem_barrier_all();
std::cout << mype << " send "<< bytes << " bytes to "<< npes-1 << " GPUs with message size(bytes) "<< message_bytes << " using nvshmem_longlong_put_warp using threads: "<< numBlock << "x"<< numThread << std::endl;
nvshmem_barrier_all();
for(int round = 0; round < num_rounds; round++)
{
int remote_pe = (mype+1)%npes;
for(int j=0; j<npes-1; j++)
{
timer.Start();
hipLaunchKernelGGL(( long_band_warp), dim3(numBlock), dim3(numThread), 0, streams[0], num_messages, message_bytes/sizeof(long long), (long long *)remote_buffer, (long long *)local_buffer, remote_pe);
remote_pe = (remote_pe+1) % npes;
}
hipDeviceSynchronize();
timer.Stop();
totaltime = totaltime + timer.ElapsedMillis();
}
nvshmem_barrier_all();
totaltime = totaltime/num_rounds;
std::cout << "PE "<<mype << " average time: " << totaltime << " bandwidth: "<<(bytes*(npes-1)/(totaltime/1000)/(1024*1024*1024))<<" GB/s"<<std::endl;
nvshmem_barrier_all();
if(mype == 0)
std::cout << "-------------------------------\n";
nvshmem_barrier_all();
totaltime = 0.0;
numBlock = numBlock*32;
numThread = numThread/32;
std::cout << mype << " send "<< bytes << " bytes to "<< npes-1 << " GPUs with message size(bytes) "<< message_bytes << " using nvshmem_longlong_put_warp using threads: "<< numBlock << "x"<< numThread << std::endl;
nvshmem_barrier_all();
for(int round = 0; round < num_rounds; round++)
{
int remote_pe = (mype+1)%npes;
for(int j=0; j<npes-1; j++)
{
timer.Start();
hipLaunchKernelGGL(( long_band_warp), dim3(numBlock), dim3(numThread), 0, streams[0], num_messages, message_bytes/sizeof(long long), (long long *)remote_buffer, (long long *)local_buffer, remote_pe);
remote_pe = (remote_pe+1) % npes;
}
hipDeviceSynchronize();
timer.Stop();
totaltime = totaltime + timer.ElapsedMillis();
}
nvshmem_barrier_all();
totaltime = totaltime/num_rounds;
std::cout << "PE "<<mype << " average time: " << totaltime << " bandwidth: "<<(bytes*(npes-1)/(totaltime/1000)/(1024*1024*1024))<<" GB/s"<<std::endl;
nvshmem_barrier_all();
if(mype == 0)
std::cout << "-------------------------------\n";
nvshmem_barrier_all();
CUDA_CHECK(hipOccupancyMaxPotentialBlockSize(&numBlock, &numThread, (void *)long_band_thread));
totaltime = 0.0;
nvshmem_barrier_all();
std::cout << mype << " send "<< bytes << " bytes to "<< npes-1 << " GPUs with message size(bytes) "<< message_bytes << " using nvshmem_longlong_put_thread using threads: "<< numBlock << "x"<< numThread << std::endl;
nvshmem_barrier_all();
for(int round = 0; round < num_rounds; round++)
{
int remote_pe = (mype+1)%npes;
for(int j=0; j<npes-1; j++)
{
timer.Start();
hipLaunchKernelGGL(( long_band_thread), dim3(numBlock), dim3(numThread), 0, streams[0], num_messages, message_bytes/sizeof(long long), (long long *)remote_buffer, (long long *)local_buffer, remote_pe);
remote_pe = (remote_pe+1) % npes;
}
hipDeviceSynchronize();
timer.Stop();
totaltime = totaltime + timer.ElapsedMillis();
}
nvshmem_barrier_all();
totaltime = totaltime/num_rounds;
std::cout << "PE "<<mype << " average time: " << totaltime << " bandwidth: "<<(bytes*(npes-1)/(totaltime/1000)/(1024*1024*1024))<<" GB/s"<<std::endl;
nvshmem_barrier_all();
if(mype == 0)
std::cout << "-------------------------------\n";
nvshmem_barrier_all();
totaltime = 0.0;
numBlock = numBlock*32;
numThread = numThread/32;
std::cout << mype << " send "<< bytes << " bytes to "<< npes-1 << " GPUs with message size(bytes) "<< message_bytes << " using nvshmem_longlong_put_thread using threads: "<< numBlock << "x"<< numThread << std::endl;
nvshmem_barrier_all();
for(int round = 0; round < num_rounds; round++)
{
int remote_pe = (mype+1)%npes;
for(int j=0; j<npes-1; j++)
{
timer.Start();
hipLaunchKernelGGL(( long_band_thread), dim3(numBlock), dim3(numThread), 0, streams[0], num_messages, message_bytes/sizeof(long long), (long long *)remote_buffer, (long long *)local_buffer, remote_pe);
remote_pe = (remote_pe+1) % npes;
}
hipDeviceSynchronize();
timer.Stop();
totaltime = totaltime + timer.ElapsedMillis();
}
nvshmem_barrier_all();
totaltime = totaltime/num_rounds;
std::cout << "PE "<<mype << " average time: " << totaltime << " bandwidth: "<<(bytes*(npes-1)/(totaltime/1000)/(1024*1024*1024))<<" GB/s"<<std::endl;
nvshmem_barrier_all();
if(mype == 0)
std::cout << "-------------------------------\n";
nvshmem_barrier_all();
nvshmem_barrier_all();
printf("[%d of %d] run complete \n", mype, npes);
nvshmem_free(remote_buffer);
nvshmem_free(local_buffer);
nvshmem_finalize();
MPI_CHECK(MPI_Finalize());
return 0;
}
| b04bded637e13207398ef52b99ce1967622a3ea5.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
* See COPYRIGHT.txt for license information
*/
#include <stdio.h>
#include <iostream>
#include "mpi.h"
#include "nvshmem.h"
#include "nvshmemx.h"
#include "time.cuh"
#undef CUDA_CHECK
#define CUDA_CHECK(stmt) \
do { \
cudaError_t result = (stmt); \
if (cudaSuccess != result) { \
fprintf(stderr, "[%s:%d] cuda failed with %s \n",\
__FILE__, __LINE__, cudaGetErrorString(result));\
exit(-1); \
} \
} while (0)
#define MPI_CHECK(stmt) \
do { \
int result = (stmt); \
if (MPI_SUCCESS != result) { \
fprintf(stderr, "[%s:%d] MPI failed with error %d \n",\
__FILE__, __LINE__, result); \
exit(-1); \
} \
} while (0)
#define TID (threadIdx.x+blockIdx.x*blockDim.x)
#define WARPID ((threadIdx.x+blockIdx.x*blockDim.x)>>5)
__global__ void long_band_block(int num_messages, int message_size, long long *remote_buffer, long long *local_buffer, int remote_pe)
{
for(int i = blockIdx.x; i<num_messages; i+=gridDim.x)
nvshmemx_longlong_put_block(remote_buffer+message_size*i, local_buffer+message_size*i, message_size, remote_pe);
}
__global__ void long_band_warp(int num_messages, int message_size, long long *remote_buffer, long long *local_buffer, int remote_pe)
{
for(int i = (TID>>5); i<num_messages; i+=((blockDim.x*gridDim.x)>>5))
nvshmemx_longlong_put_warp(remote_buffer+message_size*i, local_buffer+message_size*i, message_size, remote_pe);
}
__global__ void long_band_thread(int num_messages, int message_size, long long *remote_buffer, long long *local_buffer, int remote_pe)
{
for(int i = TID; i<num_messages; i+=blockDim.x*gridDim.x)
nvshmem_longlong_put(remote_buffer+message_size*i, local_buffer+message_size*i, message_size, remote_pe);
}
int main (int c, char *v[])
{
int rank, nranks;
MPI_Comm mpi_comm;
nvshmemx_init_attr_t attr;
int mype, npes;
MPI_CHECK(MPI_Init(&c, &v));
MPI_CHECK(MPI_Comm_rank(MPI_COMM_WORLD, &rank));
MPI_CHECK(MPI_Comm_size(MPI_COMM_WORLD, &nranks));
mpi_comm = MPI_COMM_WORLD;
attr.mpi_comm = &mpi_comm;
nvshmemx_init_attr (NVSHMEMX_INIT_WITH_MPI_COMM, &attr);
mype = nvshmem_my_pe();
npes = nvshmem_n_pes();
//application picks the device each PE will use
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
printf("[%d] has %d GPUs, setDevice on GPU %d\n", mype, deviceCount, mype%deviceCount);
CUDA_CHECK(cudaSetDevice(mype%deviceCount));
int bytes = 1<<30;
char * remote_buffer = (char *)nvshmem_malloc(sizeof(char)*bytes);
char * local_buffer;
local_buffer = (char *)nvshmem_malloc(sizeof(char)*bytes);
GpuTimer timer;
float totaltime = 0.0;
int message_bytes = 1024;
int num_messages = bytes/message_bytes;
cudaStream_t *streams;
streams = (cudaStream_t *)malloc(sizeof(cudaStream_t)*(npes-1));
for(int i=0; i<npes-1; i++)
cudaStreamCreateWithFlags(streams+i, cudaStreamNonBlocking);
int numBlock = 160;
int numThread = 1024;
int num_rounds = 20;
CUDA_CHECK(cudaOccupancyMaxPotentialBlockSize(&numBlock, &numThread, (void *)long_band_block));
nvshmem_barrier_all();
std::cout << mype << " send "<< bytes << " bytes to "<< npes-1 << " GPUs with message size(bytes) "<< message_bytes << " using nvshmem_longlong_put_block using threads: "<< numBlock << "x"<< numThread << std::endl;
nvshmem_barrier_all();
for(int round = 0; round < num_rounds; round++)
{
int remote_pe = (mype+1)%npes;
for(int j=0; j<npes-1; j++)
{
timer.Start();
long_band_block<<<numBlock, numThread, 0, streams[0]>>>(num_messages, message_bytes/sizeof(long long), (long long *)remote_buffer, (long long *)local_buffer, remote_pe);
remote_pe = (remote_pe+1) % npes;
}
cudaDeviceSynchronize();
timer.Stop();
totaltime = totaltime + timer.ElapsedMillis();
}
nvshmem_barrier_all();
totaltime = totaltime/num_rounds;
std::cout << "PE "<<mype << " average time: " << totaltime << " bandwidth: "<<(bytes*(npes-1)/(totaltime/1000)/(1024*1024*1024))<<" GB/s"<<std::endl;
nvshmem_barrier_all();
if(mype == 0)
std::cout << "-------------------------------\n";
nvshmem_barrier_all();
totaltime = 0.0;
numBlock = numBlock*32;
numThread = numThread/32;
std::cout << mype << " send "<< bytes << " bytes to "<< npes-1 << " GPUs with message size(bytes) "<< message_bytes << " using nvshmem_longlong_put_block using threads: "<< numBlock << "x"<< numThread << std::endl;
nvshmem_barrier_all();
for(int round = 0; round < num_rounds; round++)
{
int remote_pe = (mype+1)%npes;
for(int j=0; j<npes-1; j++)
{
timer.Start();
long_band_block<<<numBlock, numThread, 0, streams[0]>>>(num_messages, message_bytes/sizeof(long long), (long long *)remote_buffer, (long long *)local_buffer, remote_pe);
remote_pe = (remote_pe+1) % npes;
}
cudaDeviceSynchronize();
timer.Stop();
totaltime = totaltime + timer.ElapsedMillis();
}
nvshmem_barrier_all();
totaltime = totaltime/num_rounds;
std::cout << "PE "<<mype << " average time: " << totaltime << " bandwidth: "<<(bytes*(npes-1)/(totaltime/1000)/(1024*1024*1024))<<" GB/s"<<std::endl;
nvshmem_barrier_all();
if(mype == 0)
std::cout << "-------------------------------\n";
nvshmem_barrier_all();
CUDA_CHECK(cudaOccupancyMaxPotentialBlockSize(&numBlock, &numThread, (void *)long_band_warp));
totaltime = 0.0;
nvshmem_barrier_all();
std::cout << mype << " send "<< bytes << " bytes to "<< npes-1 << " GPUs with message size(bytes) "<< message_bytes << " using nvshmem_longlong_put_warp using threads: "<< numBlock << "x"<< numThread << std::endl;
nvshmem_barrier_all();
for(int round = 0; round < num_rounds; round++)
{
int remote_pe = (mype+1)%npes;
for(int j=0; j<npes-1; j++)
{
timer.Start();
long_band_warp<<<numBlock, numThread, 0, streams[0]>>>(num_messages, message_bytes/sizeof(long long), (long long *)remote_buffer, (long long *)local_buffer, remote_pe);
remote_pe = (remote_pe+1) % npes;
}
cudaDeviceSynchronize();
timer.Stop();
totaltime = totaltime + timer.ElapsedMillis();
}
nvshmem_barrier_all();
totaltime = totaltime/num_rounds;
std::cout << "PE "<<mype << " average time: " << totaltime << " bandwidth: "<<(bytes*(npes-1)/(totaltime/1000)/(1024*1024*1024))<<" GB/s"<<std::endl;
nvshmem_barrier_all();
if(mype == 0)
std::cout << "-------------------------------\n";
nvshmem_barrier_all();
totaltime = 0.0;
numBlock = numBlock*32;
numThread = numThread/32;
std::cout << mype << " send "<< bytes << " bytes to "<< npes-1 << " GPUs with message size(bytes) "<< message_bytes << " using nvshmem_longlong_put_warp using threads: "<< numBlock << "x"<< numThread << std::endl;
nvshmem_barrier_all();
for(int round = 0; round < num_rounds; round++)
{
int remote_pe = (mype+1)%npes;
for(int j=0; j<npes-1; j++)
{
timer.Start();
long_band_warp<<<numBlock, numThread, 0, streams[0]>>>(num_messages, message_bytes/sizeof(long long), (long long *)remote_buffer, (long long *)local_buffer, remote_pe);
remote_pe = (remote_pe+1) % npes;
}
cudaDeviceSynchronize();
timer.Stop();
totaltime = totaltime + timer.ElapsedMillis();
}
nvshmem_barrier_all();
totaltime = totaltime/num_rounds;
std::cout << "PE "<<mype << " average time: " << totaltime << " bandwidth: "<<(bytes*(npes-1)/(totaltime/1000)/(1024*1024*1024))<<" GB/s"<<std::endl;
nvshmem_barrier_all();
if(mype == 0)
std::cout << "-------------------------------\n";
nvshmem_barrier_all();
CUDA_CHECK(cudaOccupancyMaxPotentialBlockSize(&numBlock, &numThread, (void *)long_band_thread));
totaltime = 0.0;
nvshmem_barrier_all();
std::cout << mype << " send "<< bytes << " bytes to "<< npes-1 << " GPUs with message size(bytes) "<< message_bytes << " using nvshmem_longlong_put_thread using threads: "<< numBlock << "x"<< numThread << std::endl;
nvshmem_barrier_all();
for(int round = 0; round < num_rounds; round++)
{
int remote_pe = (mype+1)%npes;
for(int j=0; j<npes-1; j++)
{
timer.Start();
long_band_thread<<<numBlock, numThread, 0, streams[0]>>>(num_messages, message_bytes/sizeof(long long), (long long *)remote_buffer, (long long *)local_buffer, remote_pe);
remote_pe = (remote_pe+1) % npes;
}
cudaDeviceSynchronize();
timer.Stop();
totaltime = totaltime + timer.ElapsedMillis();
}
nvshmem_barrier_all();
totaltime = totaltime/num_rounds;
std::cout << "PE "<<mype << " average time: " << totaltime << " bandwidth: "<<(bytes*(npes-1)/(totaltime/1000)/(1024*1024*1024))<<" GB/s"<<std::endl;
nvshmem_barrier_all();
if(mype == 0)
std::cout << "-------------------------------\n";
nvshmem_barrier_all();
totaltime = 0.0;
numBlock = numBlock*32;
numThread = numThread/32;
std::cout << mype << " send "<< bytes << " bytes to "<< npes-1 << " GPUs with message size(bytes) "<< message_bytes << " using nvshmem_longlong_put_thread using threads: "<< numBlock << "x"<< numThread << std::endl;
nvshmem_barrier_all();
for(int round = 0; round < num_rounds; round++)
{
int remote_pe = (mype+1)%npes;
for(int j=0; j<npes-1; j++)
{
timer.Start();
long_band_thread<<<numBlock, numThread, 0, streams[0]>>>(num_messages, message_bytes/sizeof(long long), (long long *)remote_buffer, (long long *)local_buffer, remote_pe);
remote_pe = (remote_pe+1) % npes;
}
cudaDeviceSynchronize();
timer.Stop();
totaltime = totaltime + timer.ElapsedMillis();
}
nvshmem_barrier_all();
totaltime = totaltime/num_rounds;
std::cout << "PE "<<mype << " average time: " << totaltime << " bandwidth: "<<(bytes*(npes-1)/(totaltime/1000)/(1024*1024*1024))<<" GB/s"<<std::endl;
nvshmem_barrier_all();
if(mype == 0)
std::cout << "-------------------------------\n";
nvshmem_barrier_all();
nvshmem_barrier_all();
printf("[%d of %d] run complete \n", mype, npes);
nvshmem_free(remote_buffer);
nvshmem_free(local_buffer);
nvshmem_finalize();
MPI_CHECK(MPI_Finalize());
return 0;
}
|
02cbc556038e3ce2fc301998520162a651be1a1a.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include <cusparse_v2.h>
#include "rocblas.h"
#include <hiprand/hiprand.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include "mex.h"
#include "kcDefs.h" //see for info on anything starting with KC_
//KC_FP_TYPE can be assumed to mean "double", but originally
//this definition could also work with "float" for faster speed.
//float compatability is no longer supported in this function.
#include "kcArrayFunctions.h"
#define MAX_P 1e25
#define MIN_P 1e-25
__device__ KC_FP_TYPE positiveBound(KC_FP_TYPE a) {
//return a;
if(isinf(a))
return MAX_P;
else
return fmin(fmax(a,MIN_P),MAX_P);
}
__device__ KC_FP_TYPE h(KC_FP_TYPE z, KC_FP_TYPE gamma, KC_FP_TYPE dt, KC_FP_TYPE sh) {
return log1p(exp(z*gamma))*exp(sh)*dt;
}
__device__ KC_FP_TYPE hinv(KC_FP_TYPE y, KC_FP_TYPE gamma, KC_FP_TYPE dt, KC_FP_TYPE sh) {
return (log(exp(y/dt)-1.0)-sh)/gamma;
}
//one thread per particle <<< nTrials,nParticles >>>
__global__ void kcMoveParticles(KC_FP_TYPE * y, KC_FP_TYPE * spe, KC_FP_TYPE * pos, KC_FP_TYPE * wt, KC_FP_TYPE * b, int * betaIdxVector, KC_FP_TYPE l_0, KC_FP_TYPE g, KC_FP_TYPE w, KC_FP_TYPE dt, KC_FP_TYPE * randN, KC_FP_TYPE sigMult, KC_FP_TYPE * log_li, KC_FP_TYPE * lw, KC_FP_TYPE * lw2, KC_FP_TYPE * ncdf, KC_FP_TYPE * posc, int * trIdx, int NT, int TT, int numParticles, int t) {
int threadNum = blockIdx.x*blockDim.x + threadIdx.x;
int tr_num = (int)threadNum / (int)numParticles;
int p_num = threadNum % numParticles;
if(tr_num < NT) {
int trLength = trIdx[tr_num+1] - trIdx[tr_num];
if(t < trLength) {
int row = trIdx[tr_num] + t;
int idx = TT*p_num + row;
int pidx = tr_num*numParticles+p_num;
KC_FP_TYPE cb = b[betaIdxVector[row]];
KC_FP_TYPE sw = sqrt(w);
KC_FP_TYPE mup = (t==0)?(l_0):(pos[idx-1]+cb);
KC_FP_TYPE mu = mup;
KC_FP_TYPE sig2 = sigMult*w;
KC_FP_TYPE sig = sqrt(sig2);
KC_FP_TYPE maxI = fmin(1.0-1e-20, fmax( normcdf((1.0-mu)/sig),1e-20 ));
pos[idx] = fmin(1.0-1e-20, normcdfinv(maxI*randN[pidx])*sig + mu);
posc[pidx] = pos[idx];
KC_FP_TYPE dpos = pos[idx]-mu;
KC_FP_TYPE log_pi_k = -log(maxI)-0.5*log(2.0*M_PI*sig2) - 0.5/sig2*(dpos*dpos);
//to be stored for each particle: ncdf, lw, lw2
ncdf[idx] = normcdf((1-mup)/sw);
KC_FP_TYPE dposp = pos[idx]-mup;
KC_FP_TYPE log_p = -0*log(maxI) -0.5*log(2*M_PI*w)- 0.5/w*(dposp*dposp);
log_li[pidx] = -h(pos[idx],g,dt,spe[row])+y[row]*(log(fmax(h(pos[idx],g,1.0,spe[row]),1e-30))+log(dt))-lgamma(y[row]+1);
KC_FP_TYPE pw = (t==0)?(log(1/(KC_FP_TYPE)numParticles) ):( log(fmax(wt[idx-1], 1e-30)) );
lw[pidx] = exp(pw+log_p+log_li[pidx]-log_pi_k);
lw2[pidx] = exp(pw+log_p -log_pi_k);
//safety checks for numerical errors
if(isnan(lw[pidx]) || isinf(lw[pidx]) || isnan(pos[idx]) || isinf(pos[idx]) || isnan(lw2[pidx]) || isinf(lw2[pidx])) {
lw[pidx] = 0;
lw2[pidx] = 0;
pos[idx] = mup;
posc[pidx] = mup;
}
}
}
}
//one thread per trial <<< nTrials,1 >>>
__global__ void kcNormalizeWeights(KC_FP_TYPE * y, KC_FP_TYPE * wt, KC_FP_TYPE * wt_p, KC_FP_TYPE * lw, KC_FP_TYPE * lw2, KC_FP_TYPE * nEff, KC_FP_TYPE * cumsum, int * trIdx, int NT, int TT, int numParticles, int t) {
int tr_num = blockIdx.x*blockDim.x + threadIdx.x;
if(tr_num < NT) {
int trLength = trIdx[tr_num+1] - trIdx[tr_num];
if(t < trLength) {
int row = trIdx[tr_num] + t;
//sum up and normalize weights
KC_FP_TYPE weightSum = 0;
KC_FP_TYPE weightSum2 = 0;
for(int p_num = 0; p_num < numParticles; p_num++) {
int pidx = tr_num*numParticles+p_num;
weightSum += lw[pidx];
weightSum2 += lw2[pidx];
}
KC_FP_TYPE n_eff_den = 0;
weightSum = fmax(weightSum,1e-20);
weightSum2 = fmax(weightSum2,1e-20);
for(int p_num = 0; p_num < numParticles; p_num++) {
int idx = TT*p_num + row;
int pidx = tr_num*numParticles+p_num;
wt[idx] = lw[pidx] /weightSum;
wt_p[pidx] = lw2[pidx]/weightSum2;
n_eff_den += wt[idx]*wt[idx];
cumsum[pidx] = (p_num>0)?(cumsum[pidx-1]+wt[idx]):(wt[idx]);//for resampling
}
nEff[tr_num] = 1/n_eff_den;
}
}
}
//initial calculation - probability of each spike count coming from a rate at the bound
__global__ void kcSetupLG(KC_FP_TYPE * y,KC_FP_TYPE * spe,KC_FP_TYPE * lg,KC_FP_TYPE g, KC_FP_TYPE dt,int TT) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < TT) {
lg[idx] = exp( -h(1,g, dt,spe[idx]) + y[idx]*log(fmax(h(1,g,dt,spe[idx]),1e-30)) - lgamma(y[idx]+1));
}
}
//one thread per particle <<< nTrials,nParticles >>>
// if particles look bad, resamples them from the distribution before the next step
__global__ void kcResampleParticles(KC_FP_TYPE * y, KC_FP_TYPE * pos, KC_FP_TYPE * posc, KC_FP_TYPE * wt, KC_FP_TYPE * log_li, KC_FP_TYPE * wt_p, int minEffParticles, KC_FP_TYPE * cumsum, KC_FP_TYPE * nEff, KC_FP_TYPE * randU, KC_FP_TYPE * p_cet_0, KC_FP_TYPE * p_cgt_0a, KC_FP_TYPE * p_cgt_0b, KC_FP_TYPE * ncdf, int * trIdx, int NT, int TT, int numParticles, int t) {
int threadNum = blockIdx.x*blockDim.x + threadIdx.x;
int tr_num = (int)threadNum / (int)numParticles;
int p_num = threadNum % numParticles;
if(tr_num < NT) {
int trLength = trIdx[tr_num+1] - trIdx[tr_num];
if(t < trLength) {
int pidx = tr_num*numParticles+p_num;
int row = trIdx[tr_num] + t;
int idx = TT*p_num + row;
int pidx_new = pidx;
if(nEff[tr_num] < minEffParticles) {
int p_num_new;
for(p_num_new = 0; p_num_new < numParticles-1 && randU[pidx] > cumsum[numParticles*tr_num+p_num_new]; p_num_new++) {
//everything taken care of in loop statement
}
pidx_new = tr_num*numParticles+p_num_new;
wt[idx] = 1.0/(KC_FP_TYPE)numParticles; //weights are now uniform again
pos[idx] = posc[pidx_new];
}
KC_FP_TYPE wt_old = (t==0)?(1.0/(KC_FP_TYPE)numParticles):(wt[idx-1]);
p_cet_0[pidx] = (1.0-ncdf[idx])*wt_old;
p_cgt_0a[pidx] = exp(log_li[pidx])*wt_p[pidx]; //or pidx_new?
p_cgt_0b[pidx] = ncdf[idx]*wt_old;
}
}
}
//one thread per trial <<< nTrials,1 >>>
//move bound crossing probabilities forward in time
__global__ void kcPropogateBoundaryDensity(KC_FP_TYPE * y, KC_FP_TYPE * p_clt, KC_FP_TYPE * p_cet, KC_FP_TYPE * p_cgt, KC_FP_TYPE * p_clte, KC_FP_TYPE * p_cpr, KC_FP_TYPE * p_cet_0, KC_FP_TYPE * p_cgt_0a, KC_FP_TYPE * p_cgt_0b, KC_FP_TYPE * lg, KC_FP_TYPE * nEff, int minEffParticles, KC_FP_TYPE * cumsum, int * trIdx, int NT, int TT, int numParticles, int t) {
int tr_num = blockIdx.x*blockDim.x + threadIdx.x;
if(tr_num < NT) {
int trLength = trIdx[tr_num+1] - trIdx[tr_num];
if(t < trLength) {
int row = trIdx[tr_num] + t;
KC_FP_TYPE p_cet_s = 0;
KC_FP_TYPE p_cgt_sa = 0;
KC_FP_TYPE p_cgt_sb = 0;
for(int p_num = 0; p_num < numParticles; p_num++) {
int pidx = tr_num*numParticles+p_num;
//int idx = TT*p_num + row;
p_cet_s += p_cet_0[pidx];
p_cgt_sa += p_cgt_0a[pidx];
p_cgt_sb += p_cgt_0b[pidx];
//finished a bit of the resampler that must run post-sampling for parallelization not to screw up, this will only be used again if this is last timestep in trial
if(nEff[tr_num] < minEffParticles && t-1==trLength) {
cumsum[pidx] = 1/(KC_FP_TYPE)numParticles*(1+p_num);
}
}
KC_FP_TYPE p_clte_old = ((t==0)?(0):(p_clte[row-1]));
KC_FP_TYPE p_cgt_old = ((t==0)?(1):(p_cgt[row-1]));
KC_FP_TYPE p_clt_1 = lg[row]*p_clte_old;
KC_FP_TYPE p_cet_1 = lg[row]*(1.0-p_clte_old)*p_cet_s;
KC_FP_TYPE p_cgt_1 = (1.0-p_clte_old)*p_cgt_sa*p_cgt_sb;
p_cet[row] = p_cet_1/(p_clt_1+p_cet_1+p_cgt_1);
p_clte[row] = (p_cet_1+p_clt_1)/(p_clt_1+p_cet_1+p_cgt_1); //this is a little redudant, but I think it is convenient later?
p_clt[row] = p_clt_1/(p_clt_1+p_cet_1+p_cgt_1);
p_cgt[row] = p_cgt_1/(p_clt_1+p_cet_1+p_cgt_1);
p_cpr[row] = p_cgt_old*p_cet_s; //compare this index in MATLAB code
}
}
}
//Finally do that backwards sampling, <<< NT, 1 >>>
__global__ void kcBackwardsSample(KC_FP_TYPE * sample, int * crossingTimes, KC_FP_TYPE * pos, KC_FP_TYPE * wt, KC_FP_TYPE * ncdf, KC_FP_TYPE * b, int * betaIdx, KC_FP_TYPE l_0, KC_FP_TYPE w, KC_FP_TYPE g, KC_FP_TYPE * p_cpr, KC_FP_TYPE * p_clte, KC_FP_TYPE * randUp, KC_FP_TYPE * randUb, KC_FP_TYPE * wt_p, KC_FP_TYPE * cumsum, int * trIdx, int NT, int TT, int numParticles, int t) {
int tr_num = blockIdx.x*blockDim.x + threadIdx.x;
if(tr_num < NT) {
int trLength = trIdx[tr_num+1] - trIdx[tr_num];
int row = trIdx[tr_num] + t;
if(t == trLength-1) {
//if t=end of trial, start off the backwards sampling
crossingTimes[tr_num] = trLength;
//decide whether end trial has hit boundary
if(randUb[tr_num] < p_clte[row]) {
sample[row] = 1;
crossingTimes[tr_num] = t;
}
//else select a particle to be end of trial (cumsum holds the CDF of the distribution over particles)
else {
int p_num;
for(p_num = 0; p_num < numParticles-1 && randUp[tr_num] > cumsum[numParticles*tr_num+p_num]; p_num++) {
}
int idx = TT*p_num + row;
sample[row] = pos[idx];
}
}
else if(t < trLength-1 && t >= 0) {
//else, propgate backwards
//if previous sample had hit threshold
if(sample[row+1] >= 1) {
//if boundary already reached
if(randUb[tr_num] < p_clte[row]/(p_cpr[row+1] + p_clte[row])) {
crossingTimes[tr_num] = t;
sample[row] = 1;
}
//gets pre-crossing particle
else {
KC_FP_TYPE wtSum = 0;
int p_num;
for(p_num = 0; p_num < numParticles; p_num++) {
int idx = TT*p_num + row;
int pidx = tr_num*numParticles+p_num;
wt_p[pidx] = wt[idx]*fmax(1.0-ncdf[idx+1],1e-25);
wtSum += wt_p[pidx];
}
wtSum = fmax(wtSum,1e-30);
KC_FP_TYPE csum = wt_p[tr_num*numParticles+0]/wtSum;
for(p_num = 0; p_num < numParticles-1 && csum < randUp[tr_num]; p_num++) {
int pidx = tr_num*numParticles+p_num+1;
csum += wt_p[pidx]/wtSum;
}
int idx = TT*p_num + row;
sample[row] = pos[idx];
}
}
//else, samples a particle
else {
KC_FP_TYPE wtSum = 0;
int p_num;
for(p_num = 0; p_num < numParticles; p_num++) {
int idx = TT*p_num + row;
int pidx = tr_num*numParticles+p_num;
wt_p[pidx] = wt[idx]*exp(-0.5/w*pow( sample[row+1] - (pos[idx] + b[betaIdx[row]]),2 ));
wtSum += wt_p[pidx];
}
wtSum = fmax(wtSum,1e-30);
KC_FP_TYPE csum = wt_p[tr_num*numParticles+0]/wtSum;
for(p_num = 0; p_num < numParticles-1 && csum < randUp[tr_num]; p_num++) {
int pidx = tr_num*numParticles+p_num+1;
csum += wt_p[pidx]/wtSum;
}
int idx = TT*p_num + row;
sample[row] = pos[idx];
}
}
}
}
/*
Performs a forward sweep of the path after backwards sampling
Draws from prior for steps post-threshold crossing (for conjugate sampling of parameters)
Calculates som statistics for later sampling
trial number given by CUDA thread
*/
__global__ void kcForwardFinalPass( KC_FP_TYPE* lambda, const int * crossingTimes, const KC_FP_TYPE * randUni, const KC_FP_TYPE* b, const int * betaIndVec,const KC_FP_TYPE l_0, const KC_FP_TYPE w, const int* trIdx,const int NT, KC_FP_TYPE * beta_sum) {
int tr_num = blockIdx.x*blockDim.x+threadIdx.x;
if(tr_num < NT) {
int t_0 = trIdx[tr_num];
beta_sum[tr_num] = 0;
int trLength = trIdx[tr_num+1] - trIdx[tr_num];
KC_FP_TYPE cb = b[betaIndVec[t_0]];
for(int t = 0; t < trLength; t++) {
if(t == crossingTimes[tr_num]) {
//samples the first value of lambda to cross the bound (truncated normal, > 1)
KC_FP_TYPE mu = (t > 0)?(lambda[t_0 + t-1]+cb):l_0;
KC_FP_TYPE minS = normcdf((1-mu)/sqrt(w));
if(minS >= 1.0-1e-5) {
lambda[t_0 + t] = 1;
}
else {
lambda[t_0 + t] = mu+sqrt(w)*normcdfinv( minS + (1-minS)*randUni[t_0+t]);
}
}
else if(t > crossingTimes[tr_num]) {
lambda[t_0 + t] = lambda[t_0 + t - 1] + cb + KC_SQRT(w)*normcdfinv( randUni[t_0+t]);
}
beta_sum[tr_num] += (t>0 && t <= crossingTimes[tr_num])?(lambda[t_0 + t] - lambda[t_0 + t-1]):0; //only include lambdas up until first threshold crossing to look at drift rates
}
}
}
//single thread kernel to assemble stats of the ramps across trials for sampling beta,l_0
__global__ void kcAssembleSamplingStatistics(KC_FP_TYPE * sigMat, KC_FP_TYPE * muVec, const KC_FP_TYPE* lambda, const int * crossingTimes, const KC_FP_TYPE * beta_sum,const int*betaIndVec,const KC_FP_TYPE l_0, const KC_FP_TYPE w, const int* trIdx, const int NT, const int numBetas) {
int idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx == 0) {
for(int trNum = 0; trNum < NT; trNum++) {
int t_0 = trIdx[trNum];
int cb = betaIndVec[t_0];
int trLength = trIdx[trNum+1] - trIdx[trNum];
sigMat[(cb)*(numBetas+1) + cb] += fmin(1.0*crossingTimes[trNum],trLength-1.0)/w;
sigMat[(numBetas)*(numBetas+1) + numBetas] += 1.0/w;
muVec[cb] += beta_sum[trNum]/w;
muVec[numBetas] += lambda[t_0]/w;
}
}
}
//Samples a single set of latent paths from the ramping model for a set of trials given fixed parameters
//args
// 0 = new lambda (output, should be pre-allocated on GPU, same size as y)
// 1 = new auxiliary variable for threshold crossing (output, should be pre-allocated on GPU, vector of length number of trials)
// 2 = y (observations)
// 3 = trIdx (array that accesses the beta value used at each timepoint, y being indexed at 0. Includes final value that should be length of y)
// 4 = betaIdxVector (array that gives coherence used at each bins of y. i.e., accesses the beta value used at each timepoint. values begin at 0 instead of 1 to be consistent with C, unlike MATLAB)
// 5 = betas (the beta values)
// 6 = w (variance of diffusion process)
// 7 = l_0 (starting lambda value)
// 8 = g (absorbing boundary effective height)
// 9 = dt (bin/timestep size)
// 10 = numParticles
// 11 = minEffParticles (how many effective particles per trial to keep around)
// 12 = sigMult (used for particle proposals, proposal variance is sigMult*w)
// 13 = maxTrialLength
// 14 = beta/l_0 sampling vec param c (uses this as output for sampling betas, l_0)
// 15 = beta/l_0 sampling vec param p uses this as output for sampling betas, l_0)
// 16 = spike history effect
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
hipError_t ce;
hiprandStatus_t cre;
/*ce = hipSetDevice(KC_GPU_DEVICE);
if(ce != hipSuccess) {
mexPrintf("Error initializing device (kcParticleFilterProp.cu) ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}*/
//init data
unsigned int TT = kcGetArrayNumEl(prhs[0]);
KC_FP_TYPE * lambdaTarget = kcGetArrayData(prhs[0]);
int * auxiliaryTarget = kcGetArrayDataInt(prhs[1]);
KC_FP_TYPE * y = kcGetArrayData(prhs[2],TT);
int * trIdx = kcGetArrayDataInt(prhs[3]);
unsigned int NT = kcGetArrayNumEl(prhs[3])-1;
int * betaIdxVector = kcGetArrayDataInt(prhs[4]);
KC_FP_TYPE * b = mxGetPr(prhs[5]);
int numBetas = mxGetNumberOfElements(prhs[5]);
KC_FP_TYPE * b_gpu;
ce = hipMalloc((void**)&b_gpu,sizeof(KC_FP_TYPE)*numBetas);
if(ce != hipSuccess) {
mexPrintf("Error allocating space for betas on GPU - first allocation in function (particle filter) ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
ce = hipMemcpy(b_gpu,b,sizeof(KC_FP_TYPE)*numBetas,hipMemcpyHostToDevice);
if(ce != hipSuccess) {
mexPrintf("Error moving betas to GPU (particle filter) ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
KC_FP_TYPE w = mxGetScalar(prhs[6]);
KC_FP_TYPE l_0 = mxGetScalar(prhs[7]);
KC_FP_TYPE g = mxGetScalar(prhs[8]);
KC_FP_TYPE dt = mxGetScalar(prhs[9]);
int numParticles = mxGetScalar(prhs[10]);
int minEffParticles = mxGetScalar(prhs[11]);
int sigMult = mxGetScalar(prhs[12]);
int maxTrialLength = mxGetScalar(prhs[13]);
//load spike history effect
KC_FP_TYPE * spe = kcGetArrayData(prhs[16],TT);
//particle weights/probabilities of hitting the bound
KC_FP_TYPE * p_clte;
KC_FP_TYPE * p_cet;
KC_FP_TYPE * p_cgt;
KC_FP_TYPE * p_clt;
KC_FP_TYPE * p_cpr;
checkCudaErrors(hipMalloc((void**)&p_clte, TT*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&p_cet, TT*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&p_cgt, TT*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&p_clt, TT*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&p_cpr, TT*sizeof(KC_FP_TYPE)));
KC_FP_TYPE * wt;
KC_FP_TYPE * wt_p;
KC_FP_TYPE * pos;//particle positions
checkCudaErrors(hipMalloc((void**)&wt, (TT)*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&wt_p, (NT)*numParticles*sizeof(KC_FP_TYPE)));
ce = hipMalloc((void**)&pos, (TT)*numParticles*sizeof(KC_FP_TYPE));
if(ce != hipSuccess) {
mexPrintf("Error allocating pos ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
KC_FP_TYPE * log_li;
KC_FP_TYPE * posc; //for resampling
KC_FP_TYPE * lw; //unnormalized weights
KC_FP_TYPE * lw2;
KC_FP_TYPE * ncdf;
KC_FP_TYPE * p_cet_0;
KC_FP_TYPE * p_cgt_0a;
KC_FP_TYPE * p_cgt_0b;
KC_FP_TYPE * lg; //log p(y|at boundary)
KC_FP_TYPE * cumsum;
KC_FP_TYPE * beta_sum;
checkCudaErrors(hipMalloc((void**)&log_li, NT*numParticles*sizeof(KC_FP_TYPE)));
//checkCudaErrors(hipMalloc((void**)&log_lic, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&posc, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&lw, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&lw2, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&ncdf, TT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&p_cet_0, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&p_cgt_0a, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&p_cgt_0b, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&cumsum, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&beta_sum, NT*sizeof(KC_FP_TYPE)));
checkCudaErrors(hipMalloc((void**)&lg, TT*sizeof(KC_FP_TYPE)));
KC_FP_TYPE * nEff;
checkCudaErrors(hipMalloc((void**)&nEff, NT*sizeof(KC_FP_TYPE)));
int randSize = (NT*numParticles) + ((NT*numParticles)%2==0?0:1);
int randSizeS = (NT) + (NT%2==0?0:1);
int randSizeT = (TT) + (TT%2==0?0:1);
KC_FP_TYPE * randN;
KC_FP_TYPE * randNs;
KC_FP_TYPE * randTs;
ce = hipMalloc((void**)&randN, randSize *sizeof(KC_FP_TYPE));
if(ce != hipSuccess) {
mexPrintf("Error allocating randN ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
}
ce = hipMalloc((void**)&randNs, randSizeS*sizeof(KC_FP_TYPE));
if(ce != hipSuccess) {
mexPrintf("Error allocating randNs ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
}
ce = hipMalloc((void**)&randTs, randSizeT*sizeof(KC_FP_TYPE));
if(ce != hipSuccess) {
mexPrintf("Error allocating randTs ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
}
//setup the random number generator
hiprandGenerator_t curandGen = 0;
hiprandStatus_t hiprandStatus_t;
hiprandStatus_t = hiprandCreateGenerator(&curandGen, HIPRAND_RNG_PSEUDO_DEFAULT);
if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS) {
char buffer [50];
sprintf(buffer, "Error initializing random number generator (%d).\n",(int)hiprandStatus_t);
mexErrMsgTxt(buffer);
}
struct timeval now;
gettimeofday(&now,NULL);
unsigned long long mySeed = (unsigned long long)now.tv_usec+(unsigned long long)(1e7*(unsigned long long)now.tv_sec);
hiprandStatus_t = hiprandSetPseudoRandomGeneratorSeed(curandGen, mySeed);
//hiprandStatus_t = hiprandSetPseudoRandomGeneratorSeed(curandGen, (unsigned int)time(NULL));
if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS) {
char buffer [50];
sprintf(buffer, "Error random number seed (%d).\n",(int)hiprandStatus_t);
mexErrMsgTxt(buffer);
}
hiprandStatus_t = hiprandGenerateSeeds(curandGen);
if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS) {
char buffer [50];
sprintf(buffer, "Error random number generating seed (%d).\n",(int)hiprandStatus_t);
mexErrMsgTxt(buffer);
}
//hipThreadSetLimit(hipLimitStackSize, 1024);
//setup initial particle positions
int blockSize , nBlocks;
int blockSizeT, nBlocksT;
int blockSizeN, nBlocksN;
blockSizeT = 4;
nBlocksT = TT/blockSizeT + ((TT%blockSizeT==0)?0:1);
blockSizeN = 1;
nBlocksN = NT/blockSizeN + ((NT%blockSizeN==0)?0:1);
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
mexPrintf("Error before kcSetupLG ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
//__global__ void kcSetupLG(KC_FP_TYPE * y,KC_FP_TYPE * spe,KC_FP_TYPE * lg,KC_FP_TYPE g, KC_FP_TYPE dt,int TT) {
hipLaunchKernelGGL(( kcSetupLG) , dim3(nBlocksT), dim3(blockSizeT) , 0, 0, y,spe,lg,g,dt,TT);
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
mexPrintf("Error after kcSetupLG<<<%d,%d>>> ",nBlocksT,blockSizeT);
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
blockSize = 8;
int totalThreads = numParticles*NT;
nBlocks = totalThreads/blockSize + ((totalThreads%blockSize==0)?0:1);
//mexPrintf("Max trial length = %d, blockSizes = %d,%d, nBlocks = %d,%d\n", maxTrialLength,blockSize,blockSizeN,nBlocks,nBlocksN);
//forward pass loop
for (int ii = 0; ii < maxTrialLength;ii++) {
//move all particles foward
cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randN,randSize); //random sample steps for all particles
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
int currDev;
hipGetDevice(&currDev);
mexPrintf("Error synchronizing post-rand draw 1 Size=%d ii=%d, current device=%d ",randSize,ii,currDev);
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
if(cre != HIPRAND_STATUS_SUCCESS) {
mexPrintf("Error after rand generation in particle propogation. Size=%d ii=%d ",randSize,ii);
mexPrintf(" (%d)\n", (int)cre);
mexErrMsgTxt("CUDA Errors");
}
hipLaunchKernelGGL(( kcMoveParticles) , dim3(nBlocks), dim3(blockSize) , 0, 0, y,spe,pos,wt, b_gpu,betaIdxVector,l_0,g,w,dt,randN, sigMult,log_li,lw,lw2,ncdf, posc, trIdx, NT, TT, numParticles, ii);
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
int currDev;
hipGetDevice(&currDev);
mexPrintf("Error after kcMoveParticles<<<%d,%d>>> ii=%d/%d, dev=%d ",nBlocks,blockSize,ii,maxTrialLength,currDev);
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
//normalize weights
hipLaunchKernelGGL(( kcNormalizeWeights) , dim3(nBlocksN),dim3(blockSizeN) , 0, 0, y,wt,wt_p, lw, lw2, nEff, cumsum, trIdx, NT, TT, numParticles, ii);
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
mexPrintf("Error after kcNormalizeWeights<<<%d,%d>>> ii=%d/%d ",nBlocksN,blockSizeN,ii,maxTrialLength);
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
//check effective num particles, resample when necessary
cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randN, randSize);
if(cre != HIPRAND_STATUS_SUCCESS) {
mexPrintf("Error after rand generation in resampler. ii=%d/%d ",ii,maxTrialLength);
mexPrintf(" (%d)\n", (int)cre);
mexErrMsgTxt("CUDA Errors");
}
hipLaunchKernelGGL(( kcResampleParticles) , dim3(nBlocks), dim3(blockSize) , 0, 0, y,pos,posc,wt,log_li,wt_p, minEffParticles,cumsum,nEff,randN,p_cet_0,p_cgt_0a,p_cgt_0b,ncdf,trIdx, NT, TT, numParticles, ii);
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
mexPrintf("Error after kcResampleParticles<<<%d,%d>>> ii=%d/%d ",nBlocks,blockSize,ii,maxTrialLength);
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
//move passage density foward
//__global__ void kcPropogateBoundaryDensity(KC_FP_TYPE * y, KC_FP_TYPE * p_clt, KC_FP_TYPE * p_cet, KC_FP_TYPE * p_cgt, KC_FP_TYPE * p_clte, KC_FP_TYPE * p_cpr, KC_FP_TYPE * p_cet_0, KC_FP_TYPE * p_cgt_0a, KC_FP_TYPE * p_cgt_0b, KC_FP_TYPE * lg, int * trIdx, KC_FP_TYPE * nEff, int minEffParticles, KC_FP_TYPE * cumsum, int t, int NT, int TT, int numParticles) {
hipLaunchKernelGGL(( kcPropogateBoundaryDensity) , dim3(nBlocksN),dim3(blockSizeN) , 0, 0, y,p_clt,p_cet,p_cgt,p_clte,p_cpr,p_cet_0,p_cgt_0a, p_cgt_0b, lg, nEff, minEffParticles, cumsum,trIdx, NT, TT, numParticles, ii);
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
mexPrintf("Error after kcPropogateBoundaryDensity<<<%d,%d>>> ii=%d/%d ",nBlocksN,blockSizeN,ii,maxTrialLength);
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
}
//backwards sample the particles
for (int jj = maxTrialLength-1; jj >= 0; jj--) {
cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randN, randSizeS);
if(cre != HIPRAND_STATUS_SUCCESS) {
mexPrintf("Error after rand generation in backwards sampler (1). jj=%d/%d ",jj,maxTrialLength);
mexPrintf(" (%d)\n", (int)cre);
mexErrMsgTxt("CUDA Errors");
}
cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randNs,randSizeS);
//ce = hipDeviceSynchronize();
if(cre != HIPRAND_STATUS_SUCCESS) {
mexPrintf("Error after rand generation in backwards sampler (2). jj=%d/%d ",jj,maxTrialLength);
mexPrintf(" (%d)\n", (int)cre);
mexErrMsgTxt("CUDA Errors");
}
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
mexPrintf("Error synchronizing before kcBackwardsSample (post random generation) jj=%d/%d ",jj,maxTrialLength);
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
hipLaunchKernelGGL(( kcBackwardsSample) , dim3(nBlocksN),dim3(blockSizeN) , 0, 0, lambdaTarget, auxiliaryTarget, pos, wt, ncdf, b_gpu, betaIdxVector, l_0, w, g, p_cpr, p_clte, randN, randNs, wt_p, cumsum, trIdx, NT, TT, numParticles, jj);
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
mexPrintf("Error after kcBackwardsSample<<<%d,%d>>> jj=%d/%d ",nBlocksN,blockSizeN,jj,maxTrialLength);
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
}
cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randTs, randSizeT);
//ce = hipDeviceSynchronize();
if(cre != HIPRAND_STATUS_SUCCESS) {
mexPrintf("Error after rand generation in final sampler (2). ");
mexPrintf(" (%d)\n", (int)cre);
mexErrMsgTxt("CUDA Errors");
}
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
mexPrintf("Error synchronizing before kcForwardFinalPass (post random generation) ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
//samples all latent variables beyond bound hit time
hipLaunchKernelGGL(( kcForwardFinalPass) , dim3(nBlocksN),dim3(blockSizeN) , 0, 0, lambdaTarget, auxiliaryTarget, randTs, b_gpu, betaIdxVector, l_0, w, trIdx, NT, beta_sum);
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
mexPrintf("Error after kcForwardFinalPass ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
//gets some statistics about the latent variables put together to be able to sample the drift rates
KC_FP_TYPE * sampling_c;
KC_FP_TYPE * sampling_p;
checkCudaErrors(hipMalloc((void**)&sampling_c, sizeof(KC_FP_TYPE)*(numBetas+1)));
checkCudaErrors(hipMalloc((void**)&sampling_p, sizeof(KC_FP_TYPE)*(numBetas+1)*(numBetas+1)));
checkCudaErrors(hipMemcpy(sampling_c,(KC_FP_TYPE*)mxGetPr(prhs[14]), sizeof(KC_FP_TYPE)*(numBetas+1),hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(sampling_p,(KC_FP_TYPE*)mxGetPr(prhs[15]), sizeof(KC_FP_TYPE)*(numBetas+1)*(numBetas+1),hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kcAssembleSamplingStatistics), dim3(1),dim3(1), 0, 0, sampling_p, sampling_c, lambdaTarget, auxiliaryTarget, beta_sum,betaIdxVector,l_0, w, trIdx, NT, numBetas);
checkCudaErrors(hipMemcpy((KC_FP_TYPE*)mxGetPr(prhs[14]),sampling_c, sizeof(KC_FP_TYPE)*(numBetas+1),hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy((KC_FP_TYPE*)mxGetPr(prhs[15]),sampling_p, sizeof(KC_FP_TYPE)*(numBetas+1)*(numBetas+1),hipMemcpyDeviceToHost));
//free up memory
cre = hiprandDestroyGenerator(curandGen);
if(cre != HIPRAND_STATUS_SUCCESS) {
mexPrintf("Error destroying rand generator (%d)\n", (int)cre);
mexErrMsgTxt("CUDA Errors");
}
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
mexPrintf("Error synchronizing post-rand generator destruction (particleFilter) ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
checkCudaErrors(hipFree(b_gpu));
checkCudaErrors(hipFree(p_clte));
checkCudaErrors(hipFree(p_cet));
checkCudaErrors(hipFree(p_cgt));
checkCudaErrors(hipFree(p_clt));
checkCudaErrors(hipFree(p_cpr));
checkCudaErrors(hipFree(pos));
checkCudaErrors(hipFree(wt));
ce = hipFree(wt_p);
if(ce != hipSuccess) {
mexPrintf("Error freeing memory in particle filter (wt_p) ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
checkCudaErrors(hipFree(log_li));
checkCudaErrors(hipFree(posc));
checkCudaErrors(hipFree(lw));
checkCudaErrors(hipFree(lw2));
checkCudaErrors(hipFree(ncdf));
checkCudaErrors(hipFree(p_cet_0));
checkCudaErrors(hipFree(p_cgt_0a));
checkCudaErrors(hipFree(p_cgt_0b));
checkCudaErrors(hipFree(lg));
checkCudaErrors(hipFree(cumsum));
checkCudaErrors(hipFree(beta_sum));
checkCudaErrors(hipFree(sampling_c));
checkCudaErrors(hipFree(sampling_p));
checkCudaErrors(hipFree(nEff));
checkCudaErrors(hipFree(randN));
checkCudaErrors(hipFree(randNs));
checkCudaErrors(hipFree(randTs));
ce = hipDeviceSynchronize();
if(ce != hipSuccess) {
mexPrintf("Error at the end ofthe particle filter ");
mexPrintf(hipGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
}
| 02cbc556038e3ce2fc301998520162a651be1a1a.cu |
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <cuda_runtime.h>
#include <cusparse_v2.h>
#include "cublas_v2.h"
#include <curand.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include "mex.h"
#include "kcDefs.h" //see for info on anything starting with KC_
//KC_FP_TYPE can be assumed to mean "double", but originally
//this definition could also work with "float" for faster speed.
//float compatability is no longer supported in this function.
#include "kcArrayFunctions.h"
#define MAX_P 1e25
#define MIN_P 1e-25
__device__ KC_FP_TYPE positiveBound(KC_FP_TYPE a) {
//return a;
if(isinf(a))
return MAX_P;
else
return fmin(fmax(a,MIN_P),MAX_P);
}
__device__ KC_FP_TYPE h(KC_FP_TYPE z, KC_FP_TYPE gamma, KC_FP_TYPE dt, KC_FP_TYPE sh) {
return log1p(exp(z*gamma))*exp(sh)*dt;
}
__device__ KC_FP_TYPE hinv(KC_FP_TYPE y, KC_FP_TYPE gamma, KC_FP_TYPE dt, KC_FP_TYPE sh) {
return (log(exp(y/dt)-1.0)-sh)/gamma;
}
//one thread per particle <<< nTrials,nParticles >>>
__global__ void kcMoveParticles(KC_FP_TYPE * y, KC_FP_TYPE * spe, KC_FP_TYPE * pos, KC_FP_TYPE * wt, KC_FP_TYPE * b, int * betaIdxVector, KC_FP_TYPE l_0, KC_FP_TYPE g, KC_FP_TYPE w, KC_FP_TYPE dt, KC_FP_TYPE * randN, KC_FP_TYPE sigMult, KC_FP_TYPE * log_li, KC_FP_TYPE * lw, KC_FP_TYPE * lw2, KC_FP_TYPE * ncdf, KC_FP_TYPE * posc, int * trIdx, int NT, int TT, int numParticles, int t) {
int threadNum = blockIdx.x*blockDim.x + threadIdx.x;
int tr_num = (int)threadNum / (int)numParticles;
int p_num = threadNum % numParticles;
if(tr_num < NT) {
int trLength = trIdx[tr_num+1] - trIdx[tr_num];
if(t < trLength) {
int row = trIdx[tr_num] + t;
int idx = TT*p_num + row;
int pidx = tr_num*numParticles+p_num;
KC_FP_TYPE cb = b[betaIdxVector[row]];
KC_FP_TYPE sw = sqrt(w);
KC_FP_TYPE mup = (t==0)?(l_0):(pos[idx-1]+cb);
KC_FP_TYPE mu = mup;
KC_FP_TYPE sig2 = sigMult*w;
KC_FP_TYPE sig = sqrt(sig2);
KC_FP_TYPE maxI = fmin(1.0-1e-20, fmax( normcdf((1.0-mu)/sig),1e-20 ));
pos[idx] = fmin(1.0-1e-20, normcdfinv(maxI*randN[pidx])*sig + mu);
posc[pidx] = pos[idx];
KC_FP_TYPE dpos = pos[idx]-mu;
KC_FP_TYPE log_pi_k = -log(maxI)-0.5*log(2.0*M_PI*sig2) - 0.5/sig2*(dpos*dpos);
//to be stored for each particle: ncdf, lw, lw2
ncdf[idx] = normcdf((1-mup)/sw);
KC_FP_TYPE dposp = pos[idx]-mup;
KC_FP_TYPE log_p = -0*log(maxI) -0.5*log(2*M_PI*w)- 0.5/w*(dposp*dposp);
log_li[pidx] = -h(pos[idx],g,dt,spe[row])+y[row]*(log(fmax(h(pos[idx],g,1.0,spe[row]),1e-30))+log(dt))-lgamma(y[row]+1);
KC_FP_TYPE pw = (t==0)?(log(1/(KC_FP_TYPE)numParticles) ):( log(fmax(wt[idx-1], 1e-30)) );
lw[pidx] = exp(pw+log_p+log_li[pidx]-log_pi_k);
lw2[pidx] = exp(pw+log_p -log_pi_k);
//safety checks for numerical errors
if(isnan(lw[pidx]) || isinf(lw[pidx]) || isnan(pos[idx]) || isinf(pos[idx]) || isnan(lw2[pidx]) || isinf(lw2[pidx])) {
lw[pidx] = 0;
lw2[pidx] = 0;
pos[idx] = mup;
posc[pidx] = mup;
}
}
}
}
//one thread per trial <<< nTrials,1 >>>
__global__ void kcNormalizeWeights(KC_FP_TYPE * y, KC_FP_TYPE * wt, KC_FP_TYPE * wt_p, KC_FP_TYPE * lw, KC_FP_TYPE * lw2, KC_FP_TYPE * nEff, KC_FP_TYPE * cumsum, int * trIdx, int NT, int TT, int numParticles, int t) {
int tr_num = blockIdx.x*blockDim.x + threadIdx.x;
if(tr_num < NT) {
int trLength = trIdx[tr_num+1] - trIdx[tr_num];
if(t < trLength) {
int row = trIdx[tr_num] + t;
//sum up and normalize weights
KC_FP_TYPE weightSum = 0;
KC_FP_TYPE weightSum2 = 0;
for(int p_num = 0; p_num < numParticles; p_num++) {
int pidx = tr_num*numParticles+p_num;
weightSum += lw[pidx];
weightSum2 += lw2[pidx];
}
KC_FP_TYPE n_eff_den = 0;
weightSum = fmax(weightSum,1e-20);
weightSum2 = fmax(weightSum2,1e-20);
for(int p_num = 0; p_num < numParticles; p_num++) {
int idx = TT*p_num + row;
int pidx = tr_num*numParticles+p_num;
wt[idx] = lw[pidx] /weightSum;
wt_p[pidx] = lw2[pidx]/weightSum2;
n_eff_den += wt[idx]*wt[idx];
cumsum[pidx] = (p_num>0)?(cumsum[pidx-1]+wt[idx]):(wt[idx]);//for resampling
}
nEff[tr_num] = 1/n_eff_den;
}
}
}
//initial calculation - probability of each spike count coming from a rate at the bound
__global__ void kcSetupLG(KC_FP_TYPE * y,KC_FP_TYPE * spe,KC_FP_TYPE * lg,KC_FP_TYPE g, KC_FP_TYPE dt,int TT) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx < TT) {
lg[idx] = exp( -h(1,g, dt,spe[idx]) + y[idx]*log(fmax(h(1,g,dt,spe[idx]),1e-30)) - lgamma(y[idx]+1));
}
}
//one thread per particle <<< nTrials,nParticles >>>
// if particles look bad, resamples them from the distribution before the next step
__global__ void kcResampleParticles(KC_FP_TYPE * y, KC_FP_TYPE * pos, KC_FP_TYPE * posc, KC_FP_TYPE * wt, KC_FP_TYPE * log_li, KC_FP_TYPE * wt_p, int minEffParticles, KC_FP_TYPE * cumsum, KC_FP_TYPE * nEff, KC_FP_TYPE * randU, KC_FP_TYPE * p_cet_0, KC_FP_TYPE * p_cgt_0a, KC_FP_TYPE * p_cgt_0b, KC_FP_TYPE * ncdf, int * trIdx, int NT, int TT, int numParticles, int t) {
int threadNum = blockIdx.x*blockDim.x + threadIdx.x;
int tr_num = (int)threadNum / (int)numParticles;
int p_num = threadNum % numParticles;
if(tr_num < NT) {
int trLength = trIdx[tr_num+1] - trIdx[tr_num];
if(t < trLength) {
int pidx = tr_num*numParticles+p_num;
int row = trIdx[tr_num] + t;
int idx = TT*p_num + row;
int pidx_new = pidx;
if(nEff[tr_num] < minEffParticles) {
int p_num_new;
for(p_num_new = 0; p_num_new < numParticles-1 && randU[pidx] > cumsum[numParticles*tr_num+p_num_new]; p_num_new++) {
//everything taken care of in loop statement
}
pidx_new = tr_num*numParticles+p_num_new;
wt[idx] = 1.0/(KC_FP_TYPE)numParticles; //weights are now uniform again
pos[idx] = posc[pidx_new];
}
KC_FP_TYPE wt_old = (t==0)?(1.0/(KC_FP_TYPE)numParticles):(wt[idx-1]);
p_cet_0[pidx] = (1.0-ncdf[idx])*wt_old;
p_cgt_0a[pidx] = exp(log_li[pidx])*wt_p[pidx]; //or pidx_new?
p_cgt_0b[pidx] = ncdf[idx]*wt_old;
}
}
}
//one thread per trial <<< nTrials,1 >>>
//move bound crossing probabilities forward in time
__global__ void kcPropogateBoundaryDensity(KC_FP_TYPE * y, KC_FP_TYPE * p_clt, KC_FP_TYPE * p_cet, KC_FP_TYPE * p_cgt, KC_FP_TYPE * p_clte, KC_FP_TYPE * p_cpr, KC_FP_TYPE * p_cet_0, KC_FP_TYPE * p_cgt_0a, KC_FP_TYPE * p_cgt_0b, KC_FP_TYPE * lg, KC_FP_TYPE * nEff, int minEffParticles, KC_FP_TYPE * cumsum, int * trIdx, int NT, int TT, int numParticles, int t) {
int tr_num = blockIdx.x*blockDim.x + threadIdx.x;
if(tr_num < NT) {
int trLength = trIdx[tr_num+1] - trIdx[tr_num];
if(t < trLength) {
int row = trIdx[tr_num] + t;
KC_FP_TYPE p_cet_s = 0;
KC_FP_TYPE p_cgt_sa = 0;
KC_FP_TYPE p_cgt_sb = 0;
for(int p_num = 0; p_num < numParticles; p_num++) {
int pidx = tr_num*numParticles+p_num;
//int idx = TT*p_num + row;
p_cet_s += p_cet_0[pidx];
p_cgt_sa += p_cgt_0a[pidx];
p_cgt_sb += p_cgt_0b[pidx];
//finished a bit of the resampler that must run post-sampling for parallelization not to screw up, this will only be used again if this is last timestep in trial
if(nEff[tr_num] < minEffParticles && t-1==trLength) {
cumsum[pidx] = 1/(KC_FP_TYPE)numParticles*(1+p_num);
}
}
KC_FP_TYPE p_clte_old = ((t==0)?(0):(p_clte[row-1]));
KC_FP_TYPE p_cgt_old = ((t==0)?(1):(p_cgt[row-1]));
KC_FP_TYPE p_clt_1 = lg[row]*p_clte_old;
KC_FP_TYPE p_cet_1 = lg[row]*(1.0-p_clte_old)*p_cet_s;
KC_FP_TYPE p_cgt_1 = (1.0-p_clte_old)*p_cgt_sa*p_cgt_sb;
p_cet[row] = p_cet_1/(p_clt_1+p_cet_1+p_cgt_1);
p_clte[row] = (p_cet_1+p_clt_1)/(p_clt_1+p_cet_1+p_cgt_1); //this is a little redudant, but I think it is convenient later?
p_clt[row] = p_clt_1/(p_clt_1+p_cet_1+p_cgt_1);
p_cgt[row] = p_cgt_1/(p_clt_1+p_cet_1+p_cgt_1);
p_cpr[row] = p_cgt_old*p_cet_s; //compare this index in MATLAB code
}
}
}
//Finally do that backwards sampling, <<< NT, 1 >>>
__global__ void kcBackwardsSample(KC_FP_TYPE * sample, int * crossingTimes, KC_FP_TYPE * pos, KC_FP_TYPE * wt, KC_FP_TYPE * ncdf, KC_FP_TYPE * b, int * betaIdx, KC_FP_TYPE l_0, KC_FP_TYPE w, KC_FP_TYPE g, KC_FP_TYPE * p_cpr, KC_FP_TYPE * p_clte, KC_FP_TYPE * randUp, KC_FP_TYPE * randUb, KC_FP_TYPE * wt_p, KC_FP_TYPE * cumsum, int * trIdx, int NT, int TT, int numParticles, int t) {
int tr_num = blockIdx.x*blockDim.x + threadIdx.x;
if(tr_num < NT) {
int trLength = trIdx[tr_num+1] - trIdx[tr_num];
int row = trIdx[tr_num] + t;
if(t == trLength-1) {
//if t=end of trial, start off the backwards sampling
crossingTimes[tr_num] = trLength;
//decide whether end trial has hit boundary
if(randUb[tr_num] < p_clte[row]) {
sample[row] = 1;
crossingTimes[tr_num] = t;
}
//else select a particle to be end of trial (cumsum holds the CDF of the distribution over particles)
else {
int p_num;
for(p_num = 0; p_num < numParticles-1 && randUp[tr_num] > cumsum[numParticles*tr_num+p_num]; p_num++) {
}
int idx = TT*p_num + row;
sample[row] = pos[idx];
}
}
else if(t < trLength-1 && t >= 0) {
//else, propgate backwards
//if previous sample had hit threshold
if(sample[row+1] >= 1) {
//if boundary already reached
if(randUb[tr_num] < p_clte[row]/(p_cpr[row+1] + p_clte[row])) {
crossingTimes[tr_num] = t;
sample[row] = 1;
}
//gets pre-crossing particle
else {
KC_FP_TYPE wtSum = 0;
int p_num;
for(p_num = 0; p_num < numParticles; p_num++) {
int idx = TT*p_num + row;
int pidx = tr_num*numParticles+p_num;
wt_p[pidx] = wt[idx]*fmax(1.0-ncdf[idx+1],1e-25);
wtSum += wt_p[pidx];
}
wtSum = fmax(wtSum,1e-30);
KC_FP_TYPE csum = wt_p[tr_num*numParticles+0]/wtSum;
for(p_num = 0; p_num < numParticles-1 && csum < randUp[tr_num]; p_num++) {
int pidx = tr_num*numParticles+p_num+1;
csum += wt_p[pidx]/wtSum;
}
int idx = TT*p_num + row;
sample[row] = pos[idx];
}
}
//else, samples a particle
else {
KC_FP_TYPE wtSum = 0;
int p_num;
for(p_num = 0; p_num < numParticles; p_num++) {
int idx = TT*p_num + row;
int pidx = tr_num*numParticles+p_num;
wt_p[pidx] = wt[idx]*exp(-0.5/w*pow( sample[row+1] - (pos[idx] + b[betaIdx[row]]),2 ));
wtSum += wt_p[pidx];
}
wtSum = fmax(wtSum,1e-30);
KC_FP_TYPE csum = wt_p[tr_num*numParticles+0]/wtSum;
for(p_num = 0; p_num < numParticles-1 && csum < randUp[tr_num]; p_num++) {
int pidx = tr_num*numParticles+p_num+1;
csum += wt_p[pidx]/wtSum;
}
int idx = TT*p_num + row;
sample[row] = pos[idx];
}
}
}
}
/*
Performs a forward sweep of the path after backwards sampling
Draws from prior for steps post-threshold crossing (for conjugate sampling of parameters)
Calculates som statistics for later sampling
trial number given by CUDA thread
*/
__global__ void kcForwardFinalPass( KC_FP_TYPE* lambda, const int * crossingTimes, const KC_FP_TYPE * randUni, const KC_FP_TYPE* b, const int * betaIndVec,const KC_FP_TYPE l_0, const KC_FP_TYPE w, const int* trIdx,const int NT, KC_FP_TYPE * beta_sum) {
int tr_num = blockIdx.x*blockDim.x+threadIdx.x;
if(tr_num < NT) {
int t_0 = trIdx[tr_num];
beta_sum[tr_num] = 0;
int trLength = trIdx[tr_num+1] - trIdx[tr_num];
KC_FP_TYPE cb = b[betaIndVec[t_0]];
for(int t = 0; t < trLength; t++) {
if(t == crossingTimes[tr_num]) {
//samples the first value of lambda to cross the bound (truncated normal, > 1)
KC_FP_TYPE mu = (t > 0)?(lambda[t_0 + t-1]+cb):l_0;
KC_FP_TYPE minS = normcdf((1-mu)/sqrt(w));
if(minS >= 1.0-1e-5) {
lambda[t_0 + t] = 1;
}
else {
lambda[t_0 + t] = mu+sqrt(w)*normcdfinv( minS + (1-minS)*randUni[t_0+t]);
}
}
else if(t > crossingTimes[tr_num]) {
lambda[t_0 + t] = lambda[t_0 + t - 1] + cb + KC_SQRT(w)*normcdfinv( randUni[t_0+t]);
}
beta_sum[tr_num] += (t>0 && t <= crossingTimes[tr_num])?(lambda[t_0 + t] - lambda[t_0 + t-1]):0; //only include lambdas up until first threshold crossing to look at drift rates
}
}
}
//single thread kernel to assemble stats of the ramps across trials for sampling beta,l_0
__global__ void kcAssembleSamplingStatistics(KC_FP_TYPE * sigMat, KC_FP_TYPE * muVec, const KC_FP_TYPE* lambda, const int * crossingTimes, const KC_FP_TYPE * beta_sum,const int*betaIndVec,const KC_FP_TYPE l_0, const KC_FP_TYPE w, const int* trIdx, const int NT, const int numBetas) {
int idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx == 0) {
for(int trNum = 0; trNum < NT; trNum++) {
int t_0 = trIdx[trNum];
int cb = betaIndVec[t_0];
int trLength = trIdx[trNum+1] - trIdx[trNum];
sigMat[(cb)*(numBetas+1) + cb] += fmin(1.0*crossingTimes[trNum],trLength-1.0)/w;
sigMat[(numBetas)*(numBetas+1) + numBetas] += 1.0/w;
muVec[cb] += beta_sum[trNum]/w;
muVec[numBetas] += lambda[t_0]/w;
}
}
}
//Samples a single set of latent paths from the ramping model for a set of trials given fixed parameters
//args
// 0 = new lambda (output, should be pre-allocated on GPU, same size as y)
// 1 = new auxiliary variable for threshold crossing (output, should be pre-allocated on GPU, vector of length number of trials)
// 2 = y (observations)
// 3 = trIdx (array that accesses the beta value used at each timepoint, y being indexed at 0. Includes final value that should be length of y)
// 4 = betaIdxVector (array that gives coherence used at each bins of y. i.e., accesses the beta value used at each timepoint. values begin at 0 instead of 1 to be consistent with C, unlike MATLAB)
// 5 = betas (the beta values)
// 6 = w (variance of diffusion process)
// 7 = l_0 (starting lambda value)
// 8 = g (absorbing boundary effective height)
// 9 = dt (bin/timestep size)
// 10 = numParticles
// 11 = minEffParticles (how many effective particles per trial to keep around)
// 12 = sigMult (used for particle proposals, proposal variance is sigMult*w)
// 13 = maxTrialLength
// 14 = beta/l_0 sampling vec param c (uses this as output for sampling betas, l_0)
// 15 = beta/l_0 sampling vec param p uses this as output for sampling betas, l_0)
// 16 = spike history effect
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
cudaError_t ce;
curandStatus_t cre;
/*ce = cudaSetDevice(KC_GPU_DEVICE);
if(ce != cudaSuccess) {
mexPrintf("Error initializing device (kcParticleFilterProp.cu) ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}*/
//init data
unsigned int TT = kcGetArrayNumEl(prhs[0]);
KC_FP_TYPE * lambdaTarget = kcGetArrayData(prhs[0]);
int * auxiliaryTarget = kcGetArrayDataInt(prhs[1]);
KC_FP_TYPE * y = kcGetArrayData(prhs[2],TT);
int * trIdx = kcGetArrayDataInt(prhs[3]);
unsigned int NT = kcGetArrayNumEl(prhs[3])-1;
int * betaIdxVector = kcGetArrayDataInt(prhs[4]);
KC_FP_TYPE * b = mxGetPr(prhs[5]);
int numBetas = mxGetNumberOfElements(prhs[5]);
KC_FP_TYPE * b_gpu;
ce = cudaMalloc((void**)&b_gpu,sizeof(KC_FP_TYPE)*numBetas);
if(ce != cudaSuccess) {
mexPrintf("Error allocating space for betas on GPU - first allocation in function (particle filter) ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
ce = cudaMemcpy(b_gpu,b,sizeof(KC_FP_TYPE)*numBetas,cudaMemcpyHostToDevice);
if(ce != cudaSuccess) {
mexPrintf("Error moving betas to GPU (particle filter) ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
KC_FP_TYPE w = mxGetScalar(prhs[6]);
KC_FP_TYPE l_0 = mxGetScalar(prhs[7]);
KC_FP_TYPE g = mxGetScalar(prhs[8]);
KC_FP_TYPE dt = mxGetScalar(prhs[9]);
int numParticles = mxGetScalar(prhs[10]);
int minEffParticles = mxGetScalar(prhs[11]);
int sigMult = mxGetScalar(prhs[12]);
int maxTrialLength = mxGetScalar(prhs[13]);
//load spike history effect
KC_FP_TYPE * spe = kcGetArrayData(prhs[16],TT);
//particle weights/probabilities of hitting the bound
KC_FP_TYPE * p_clte;
KC_FP_TYPE * p_cet;
KC_FP_TYPE * p_cgt;
KC_FP_TYPE * p_clt;
KC_FP_TYPE * p_cpr;
checkCudaErrors(cudaMalloc((void**)&p_clte, TT*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&p_cet, TT*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&p_cgt, TT*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&p_clt, TT*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&p_cpr, TT*sizeof(KC_FP_TYPE)));
KC_FP_TYPE * wt;
KC_FP_TYPE * wt_p;
KC_FP_TYPE * pos;//particle positions
checkCudaErrors(cudaMalloc((void**)&wt, (TT)*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&wt_p, (NT)*numParticles*sizeof(KC_FP_TYPE)));
ce = cudaMalloc((void**)&pos, (TT)*numParticles*sizeof(KC_FP_TYPE));
if(ce != cudaSuccess) {
mexPrintf("Error allocating pos ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
KC_FP_TYPE * log_li;
KC_FP_TYPE * posc; //for resampling
KC_FP_TYPE * lw; //unnormalized weights
KC_FP_TYPE * lw2;
KC_FP_TYPE * ncdf;
KC_FP_TYPE * p_cet_0;
KC_FP_TYPE * p_cgt_0a;
KC_FP_TYPE * p_cgt_0b;
KC_FP_TYPE * lg; //log p(y|at boundary)
KC_FP_TYPE * cumsum;
KC_FP_TYPE * beta_sum;
checkCudaErrors(cudaMalloc((void**)&log_li, NT*numParticles*sizeof(KC_FP_TYPE)));
//checkCudaErrors(cudaMalloc((void**)&log_lic, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&posc, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&lw, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&lw2, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&ncdf, TT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&p_cet_0, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&p_cgt_0a, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&p_cgt_0b, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&cumsum, NT*numParticles*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&beta_sum, NT*sizeof(KC_FP_TYPE)));
checkCudaErrors(cudaMalloc((void**)&lg, TT*sizeof(KC_FP_TYPE)));
KC_FP_TYPE * nEff;
checkCudaErrors(cudaMalloc((void**)&nEff, NT*sizeof(KC_FP_TYPE)));
int randSize = (NT*numParticles) + ((NT*numParticles)%2==0?0:1);
int randSizeS = (NT) + (NT%2==0?0:1);
int randSizeT = (TT) + (TT%2==0?0:1);
KC_FP_TYPE * randN;
KC_FP_TYPE * randNs;
KC_FP_TYPE * randTs;
ce = cudaMalloc((void**)&randN, randSize *sizeof(KC_FP_TYPE));
if(ce != cudaSuccess) {
mexPrintf("Error allocating randN ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
}
ce = cudaMalloc((void**)&randNs, randSizeS*sizeof(KC_FP_TYPE));
if(ce != cudaSuccess) {
mexPrintf("Error allocating randNs ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
}
ce = cudaMalloc((void**)&randTs, randSizeT*sizeof(KC_FP_TYPE));
if(ce != cudaSuccess) {
mexPrintf("Error allocating randTs ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
}
//setup the random number generator
curandGenerator_t curandGen = 0;
curandStatus_t curandStatus;
curandStatus = curandCreateGenerator(&curandGen, CURAND_RNG_PSEUDO_DEFAULT);
if(curandStatus != CURAND_STATUS_SUCCESS) {
char buffer [50];
sprintf(buffer, "Error initializing random number generator (%d).\n",(int)curandStatus);
mexErrMsgTxt(buffer);
}
struct timeval now;
gettimeofday(&now,NULL);
unsigned long long mySeed = (unsigned long long)now.tv_usec+(unsigned long long)(1e7*(unsigned long long)now.tv_sec);
curandStatus = curandSetPseudoRandomGeneratorSeed(curandGen, mySeed);
//curandStatus = curandSetPseudoRandomGeneratorSeed(curandGen, (unsigned int)time(NULL));
if(curandStatus != CURAND_STATUS_SUCCESS) {
char buffer [50];
sprintf(buffer, "Error random number seed (%d).\n",(int)curandStatus);
mexErrMsgTxt(buffer);
}
curandStatus = curandGenerateSeeds(curandGen);
if(curandStatus != CURAND_STATUS_SUCCESS) {
char buffer [50];
sprintf(buffer, "Error random number generating seed (%d).\n",(int)curandStatus);
mexErrMsgTxt(buffer);
}
//cudaThreadSetLimit(cudaLimitStackSize, 1024);
//setup initial particle positions
int blockSize , nBlocks;
int blockSizeT, nBlocksT;
int blockSizeN, nBlocksN;
blockSizeT = 4;
nBlocksT = TT/blockSizeT + ((TT%blockSizeT==0)?0:1);
blockSizeN = 1;
nBlocksN = NT/blockSizeN + ((NT%blockSizeN==0)?0:1);
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
mexPrintf("Error before kcSetupLG ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
//__global__ void kcSetupLG(KC_FP_TYPE * y,KC_FP_TYPE * spe,KC_FP_TYPE * lg,KC_FP_TYPE g, KC_FP_TYPE dt,int TT) {
kcSetupLG <<< nBlocksT, blockSizeT >>> (y,spe,lg,g,dt,TT);
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
mexPrintf("Error after kcSetupLG<<<%d,%d>>> ",nBlocksT,blockSizeT);
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
blockSize = 8;
int totalThreads = numParticles*NT;
nBlocks = totalThreads/blockSize + ((totalThreads%blockSize==0)?0:1);
//mexPrintf("Max trial length = %d, blockSizes = %d,%d, nBlocks = %d,%d\n", maxTrialLength,blockSize,blockSizeN,nBlocks,nBlocksN);
//forward pass loop
for (int ii = 0; ii < maxTrialLength;ii++) {
//move all particles foward
cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randN,randSize); //random sample steps for all particles
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
int currDev;
cudaGetDevice(&currDev);
mexPrintf("Error synchronizing post-rand draw 1 Size=%d ii=%d, current device=%d ",randSize,ii,currDev);
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
if(cre != CURAND_STATUS_SUCCESS) {
mexPrintf("Error after rand generation in particle propogation. Size=%d ii=%d ",randSize,ii);
mexPrintf(" (%d)\n", (int)cre);
mexErrMsgTxt("CUDA Errors");
}
kcMoveParticles <<< nBlocks, blockSize >>> (y,spe,pos,wt, b_gpu,betaIdxVector,l_0,g,w,dt,randN, sigMult,log_li,lw,lw2,ncdf, posc, trIdx, NT, TT, numParticles, ii);
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
int currDev;
cudaGetDevice(&currDev);
mexPrintf("Error after kcMoveParticles<<<%d,%d>>> ii=%d/%d, dev=%d ",nBlocks,blockSize,ii,maxTrialLength,currDev);
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
//normalize weights
kcNormalizeWeights <<< nBlocksN,blockSizeN >>> (y,wt,wt_p, lw, lw2, nEff, cumsum, trIdx, NT, TT, numParticles, ii);
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
mexPrintf("Error after kcNormalizeWeights<<<%d,%d>>> ii=%d/%d ",nBlocksN,blockSizeN,ii,maxTrialLength);
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
//check effective num particles, resample when necessary
cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randN, randSize);
if(cre != CURAND_STATUS_SUCCESS) {
mexPrintf("Error after rand generation in resampler. ii=%d/%d ",ii,maxTrialLength);
mexPrintf(" (%d)\n", (int)cre);
mexErrMsgTxt("CUDA Errors");
}
kcResampleParticles <<< nBlocks, blockSize >>> (y,pos,posc,wt,log_li,wt_p, minEffParticles,cumsum,nEff,randN,p_cet_0,p_cgt_0a,p_cgt_0b,ncdf,trIdx, NT, TT, numParticles, ii);
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
mexPrintf("Error after kcResampleParticles<<<%d,%d>>> ii=%d/%d ",nBlocks,blockSize,ii,maxTrialLength);
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
//move passage density foward
//__global__ void kcPropogateBoundaryDensity(KC_FP_TYPE * y, KC_FP_TYPE * p_clt, KC_FP_TYPE * p_cet, KC_FP_TYPE * p_cgt, KC_FP_TYPE * p_clte, KC_FP_TYPE * p_cpr, KC_FP_TYPE * p_cet_0, KC_FP_TYPE * p_cgt_0a, KC_FP_TYPE * p_cgt_0b, KC_FP_TYPE * lg, int * trIdx, KC_FP_TYPE * nEff, int minEffParticles, KC_FP_TYPE * cumsum, int t, int NT, int TT, int numParticles) {
kcPropogateBoundaryDensity <<< nBlocksN,blockSizeN >>> (y,p_clt,p_cet,p_cgt,p_clte,p_cpr,p_cet_0,p_cgt_0a, p_cgt_0b, lg, nEff, minEffParticles, cumsum,trIdx, NT, TT, numParticles, ii);
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
mexPrintf("Error after kcPropogateBoundaryDensity<<<%d,%d>>> ii=%d/%d ",nBlocksN,blockSizeN,ii,maxTrialLength);
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
}
//backwards sample the particles
for (int jj = maxTrialLength-1; jj >= 0; jj--) {
cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randN, randSizeS);
if(cre != CURAND_STATUS_SUCCESS) {
mexPrintf("Error after rand generation in backwards sampler (1). jj=%d/%d ",jj,maxTrialLength);
mexPrintf(" (%d)\n", (int)cre);
mexErrMsgTxt("CUDA Errors");
}
cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randNs,randSizeS);
//ce = cudaDeviceSynchronize();
if(cre != CURAND_STATUS_SUCCESS) {
mexPrintf("Error after rand generation in backwards sampler (2). jj=%d/%d ",jj,maxTrialLength);
mexPrintf(" (%d)\n", (int)cre);
mexErrMsgTxt("CUDA Errors");
}
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
mexPrintf("Error synchronizing before kcBackwardsSample (post random generation) jj=%d/%d ",jj,maxTrialLength);
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
kcBackwardsSample <<< nBlocksN,blockSizeN >>> (lambdaTarget, auxiliaryTarget, pos, wt, ncdf, b_gpu, betaIdxVector, l_0, w, g, p_cpr, p_clte, randN, randNs, wt_p, cumsum, trIdx, NT, TT, numParticles, jj);
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
mexPrintf("Error after kcBackwardsSample<<<%d,%d>>> jj=%d/%d ",nBlocksN,blockSizeN,jj,maxTrialLength);
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
}
cre = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randTs, randSizeT);
//ce = cudaDeviceSynchronize();
if(cre != CURAND_STATUS_SUCCESS) {
mexPrintf("Error after rand generation in final sampler (2). ");
mexPrintf(" (%d)\n", (int)cre);
mexErrMsgTxt("CUDA Errors");
}
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
mexPrintf("Error synchronizing before kcForwardFinalPass (post random generation) ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
//samples all latent variables beyond bound hit time
kcForwardFinalPass <<< nBlocksN,blockSizeN >>> (lambdaTarget, auxiliaryTarget, randTs, b_gpu, betaIdxVector, l_0, w, trIdx, NT, beta_sum);
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
mexPrintf("Error after kcForwardFinalPass ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
//gets some statistics about the latent variables put together to be able to sample the drift rates
KC_FP_TYPE * sampling_c;
KC_FP_TYPE * sampling_p;
checkCudaErrors(cudaMalloc((void**)&sampling_c, sizeof(KC_FP_TYPE)*(numBetas+1)));
checkCudaErrors(cudaMalloc((void**)&sampling_p, sizeof(KC_FP_TYPE)*(numBetas+1)*(numBetas+1)));
checkCudaErrors(cudaMemcpy(sampling_c,(KC_FP_TYPE*)mxGetPr(prhs[14]), sizeof(KC_FP_TYPE)*(numBetas+1),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(sampling_p,(KC_FP_TYPE*)mxGetPr(prhs[15]), sizeof(KC_FP_TYPE)*(numBetas+1)*(numBetas+1),cudaMemcpyHostToDevice));
kcAssembleSamplingStatistics<<<1,1>>>(sampling_p, sampling_c, lambdaTarget, auxiliaryTarget, beta_sum,betaIdxVector,l_0, w, trIdx, NT, numBetas);
checkCudaErrors(cudaMemcpy((KC_FP_TYPE*)mxGetPr(prhs[14]),sampling_c, sizeof(KC_FP_TYPE)*(numBetas+1),cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy((KC_FP_TYPE*)mxGetPr(prhs[15]),sampling_p, sizeof(KC_FP_TYPE)*(numBetas+1)*(numBetas+1),cudaMemcpyDeviceToHost));
//free up memory
cre = curandDestroyGenerator(curandGen);
if(cre != CURAND_STATUS_SUCCESS) {
mexPrintf("Error destroying rand generator (%d)\n", (int)cre);
mexErrMsgTxt("CUDA Errors");
}
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
mexPrintf("Error synchronizing post-rand generator destruction (particleFilter) ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
checkCudaErrors(cudaFree(b_gpu));
checkCudaErrors(cudaFree(p_clte));
checkCudaErrors(cudaFree(p_cet));
checkCudaErrors(cudaFree(p_cgt));
checkCudaErrors(cudaFree(p_clt));
checkCudaErrors(cudaFree(p_cpr));
checkCudaErrors(cudaFree(pos));
checkCudaErrors(cudaFree(wt));
ce = cudaFree(wt_p);
if(ce != cudaSuccess) {
mexPrintf("Error freeing memory in particle filter (wt_p) ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
checkCudaErrors(cudaFree(log_li));
checkCudaErrors(cudaFree(posc));
checkCudaErrors(cudaFree(lw));
checkCudaErrors(cudaFree(lw2));
checkCudaErrors(cudaFree(ncdf));
checkCudaErrors(cudaFree(p_cet_0));
checkCudaErrors(cudaFree(p_cgt_0a));
checkCudaErrors(cudaFree(p_cgt_0b));
checkCudaErrors(cudaFree(lg));
checkCudaErrors(cudaFree(cumsum));
checkCudaErrors(cudaFree(beta_sum));
checkCudaErrors(cudaFree(sampling_c));
checkCudaErrors(cudaFree(sampling_p));
checkCudaErrors(cudaFree(nEff));
checkCudaErrors(cudaFree(randN));
checkCudaErrors(cudaFree(randNs));
checkCudaErrors(cudaFree(randTs));
ce = cudaDeviceSynchronize();
if(ce != cudaSuccess) {
mexPrintf("Error at the end ofthe particle filter ");
mexPrintf(cudaGetErrorString(ce));
mexPrintf(" (%d)\n", (int)ce);
mexErrMsgTxt("CUDA Errors");
}
}
|
b630f29cf73acddd8ebfb0dfac0b8194e607c2e8.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2019 BlazingDB, Inc.
* Copyright 2019 Felipe Aramburu <[email protected]>
* Copyright 2018 Rommel Quintanilla <[email protected]>
* Copyright 2019 William Scott Malpica <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <cudf/binaryop.hpp>
#include <cudf/cudf.h>
#include <utilities/cudf_utils.h>
#include <cudf/functions.h>
#include <cudf/types.h>
#include <iostream>
#include <random>
#include <nvstrings/NVCategory.h>
#include <nvstrings/NVStrings.h>
#include <rmm/rmm.h>
#include <cstring>
#include <tests/utilities/cudf_test_utils.cuh>
#include <tests/utilities/cudf_test_fixtures.h>
#include <tests/utilities/nvcategory_utils.cuh>
#include <bitmask/legacy/bit_mask.cuh>
// See this header for all of the handling of valids' vectors
#include <tests/utilities/valid_vectors.h>
#include <string/nvcategory_util.hpp>
gdf_column * create_column_ints(int32_t* host_data, gdf_size_type num_rows){
gdf_column * column = new gdf_column{};
int32_t * data;
EXPECT_EQ(RMM_ALLOC(&data, num_rows * sizeof(int32_t) , 0), RMM_SUCCESS);
CUDA_TRY( hipMemcpy(data, host_data, sizeof(int32_t) * num_rows, hipMemcpyHostToDevice) );
bit_mask::bit_mask_t * valid;
bit_mask::create_bit_mask(&valid, num_rows,1);
gdf_error err = gdf_column_view(column,
(void *) data,
(gdf_valid_type *)valid,
num_rows,
GDF_INT32);
return column;
}
gdf_column * create_column_constant(gdf_size_type num_rows, int value){
gdf_column * column = new gdf_column{};
int * data;
bit_mask::bit_mask_t * valid;
bit_mask::create_bit_mask(&valid, num_rows,1);
EXPECT_EQ(RMM_ALLOC(&data, num_rows * sizeof(int) , 0), RMM_SUCCESS);
hipMemset(data,value,sizeof(int) * num_rows);
gdf_error err = gdf_column_view(column,
(void *) data,
(gdf_valid_type *) valid,
num_rows,
GDF_INT32);
return column;
}
int32_t* generate_int_data(gdf_size_type num_rows, size_t max_value, bool print=false){
int32_t* host_data = new int32_t[num_rows];
for(gdf_size_type row_index = 0; row_index < num_rows; row_index++){
host_data[row_index] = std::rand() % max_value;
if(print)
std::cout<<host_data[row_index]<<"\t";
}
if(print)
std::cout<<std::endl;
return host_data;
}
struct NVCategoryTest : public GdfTest
{
gdf_column * create_boolean_column(gdf_size_type num_rows){
gdf_column * column = new gdf_column{};
int * data;
bit_mask::bit_mask_t * valid;
bit_mask::create_bit_mask(&valid, num_rows,1);
EXPECT_EQ(RMM_ALLOC(&data, num_rows * sizeof(int8_t) , 0), RMM_SUCCESS);
gdf_error err = gdf_column_view(column,
(void *) data,
(gdf_valid_type *) valid,
num_rows,
GDF_INT8);
return column;
}
gdf_column * create_indices_column(gdf_size_type num_rows){
gdf_column * column = new gdf_column{};
int * data;
bit_mask::bit_mask_t * valid;
bit_mask::create_bit_mask(&valid, num_rows,1);
EXPECT_EQ(RMM_ALLOC(&data, num_rows * sizeof(int) , 0), RMM_SUCCESS);
gdf_error err = gdf_column_view(column,
(void *) data,
(gdf_valid_type *) valid,
num_rows,
GDF_INT32);
return column;
}
};
//todo refactor tests
TEST_F(NVCategoryTest, TEST_NVCATEGORY_SORTING)
{
bool print = false;
const int rows_size = 64;
const int length = 2;
const char ** string_data = cudf::test::generate_string_data(rows_size, length, print);
gdf_column * column = cudf::test::create_nv_category_column_strings(string_data, rows_size);
gdf_column * output_column = create_indices_column(rows_size);
gdf_column ** input_columns = new gdf_column *[1];
input_columns[0] = column;
if(print){
print_gdf_column(input_columns[0]);
}
int8_t *asc_desc;
EXPECT_EQ(RMM_ALLOC(&asc_desc, 1, 0), RMM_SUCCESS);
int8_t minus_one = -1; //desc
hipMemset(asc_desc, minus_one, 1);
gdf_context context;
context.flag_null_sort_behavior = GDF_NULL_AS_LARGEST;
//doesnt output nvcategory type columns so works as is
gdf_error err = gdf_order_by(input_columns, asc_desc, 1, output_column, &context);
EXPECT_EQ(GDF_SUCCESS, err);
if(print){
print_gdf_column(output_column);
}
int* host_data = new int[rows_size];
CUDA_TRY( hipMemcpy(
host_data,
output_column->data,
sizeof(int) * output_column->size,
hipMemcpyDeviceToHost) );
std::vector<std::string> strings_vector(string_data, string_data + rows_size);
for(size_t i = 0; i < rows_size - 1; i++){
EXPECT_TRUE(strings_vector[host_data[i]] >= strings_vector[host_data[i+1]]);
}
}
// Selects the kind of join operation that is performed
enum struct agg_op
{
MIN,//0
MAX,//1
SUM,//2
CNT,//3
AVG //4
};
template <agg_op op>
struct AggOp {
template <typename T>
T operator()(const T a, const T b) {
return static_cast<T>(0);
}
template <typename T>
T operator()(const T a) {
return static_cast<T>(0);
}
};
template<>
struct AggOp<agg_op::MIN> {
template <typename T>
T operator()(const T a, const T b) {
return (a < b)? a : b;
}
template <typename T>
T operator()(const T a) {
return a;
}
};
template<>
struct AggOp<agg_op::MAX> {
template <typename T>
T operator()(const T a, const T b) {
return (a > b)? a : b;
}
template <typename T>
T operator()(const T a) {
return a;
}
};
template<>
struct AggOp<agg_op::SUM> {
template <typename T>
T operator()(const T a, const T b) {
return a + b;
}
template <typename T>
T operator()(const T a) {
return a;
}
};
template<>
struct AggOp<agg_op::CNT> {
size_t count{0};
template <typename T>
T operator()(const T a, const T b) {
count = a+1;
return count;
}
template <typename T>
T operator()(const T a) {
count = 1;
return count;
}
};
struct NVCategoryGroupByTest : public GdfTest
{
using output_t = int32_t;
using map_t = std::map<std::string, output_t>;
const int length = 1;
std::vector<std::string> input_key;
std::vector<output_t> input_value;
std::vector<std::string> output_key;
std::vector<output_t> output_value;
gdf_context ctxt = {0, GDF_HASH, 1};
// Containers for the raw pointers to the gdf_columns that will be used as input
// to the gdf_group_by functions
std::vector<gdf_column*> gdf_raw_input_key_columns;
gdf_column* gdf_raw_input_val_column;
std::vector<gdf_column*> gdf_raw_output_key_columns;
gdf_column* gdf_raw_output_val_column;
void copy_output(gdf_column* group_by_output_key, std::vector<std::string>& output_key,
gdf_column* group_by_output_value, std::vector<output_t>& output_value){
const size_t keys_size = group_by_output_key->size;
NVStrings * temp_strings = static_cast<NVCategory *>(group_by_output_key->dtype_info.category)->gather_strings(
(nv_category_index_type *) group_by_output_key->data, keys_size, DEVICE_ALLOCATED );
char** host_strings = new char*[keys_size];
for(size_t i=0;i<keys_size;i++){
host_strings[i]=new char[length+1];
}
temp_strings->to_host(host_strings, 0, keys_size);
for(size_t i=0;i<keys_size;i++){
host_strings[i][length]=0;
}
output_key = std::vector<std::string>(host_strings, host_strings + keys_size);
NVStrings::destroy(temp_strings);
for(size_t i = 0; i < keys_size; i++){
delete host_strings[i];
}
delete host_strings;
output_value.resize(group_by_output_value->size);
CUDA_TRY( hipMemcpy(output_value.data(), group_by_output_value->data, sizeof(output_t) * group_by_output_value->size, hipMemcpyDeviceToHost) );
}
void compute_gdf_result(agg_op op, const gdf_error expected_error = GDF_SUCCESS, bool print=false)
{
const int num_columns = gdf_raw_input_key_columns.size();
gdf_error error{GDF_SUCCESS};
gdf_column **group_by_input_key = gdf_raw_input_key_columns.data();
gdf_column *group_by_input_value = gdf_raw_input_val_column;
gdf_column **group_by_output_key = gdf_raw_output_key_columns.data();
gdf_column *group_by_output_value = gdf_raw_output_val_column;
switch(op)
{
case agg_op::MIN:
{
error = gdf_group_by_min(num_columns,
group_by_input_key,
group_by_input_value,
nullptr,
group_by_output_key,
group_by_output_value,
&ctxt);
break;
}
case agg_op::MAX:
{
error = gdf_group_by_max(num_columns,
group_by_input_key,
group_by_input_value,
nullptr,
group_by_output_key,
group_by_output_value,
&ctxt);
break;
}
case agg_op::SUM:
{
error = gdf_group_by_sum(num_columns,
group_by_input_key,
group_by_input_value,
nullptr,
group_by_output_key,
group_by_output_value,
&ctxt);
break;
}
case agg_op::CNT:
{
error = gdf_group_by_count(num_columns,
group_by_input_key,
group_by_input_value,
nullptr,
group_by_output_key,
group_by_output_value,
&ctxt);
break;
}
case agg_op::AVG:
{
error = gdf_group_by_avg(num_columns,
group_by_input_key,
group_by_input_value,
nullptr,
group_by_output_key,
group_by_output_value,
&ctxt);
break;
}
default:
error = GDF_INVALID_AGGREGATOR;
}
EXPECT_EQ(expected_error, error) << "The gdf group by function did not complete successfully";
if (GDF_SUCCESS == expected_error ) {
copy_output(
group_by_output_key[0], output_key,
group_by_output_value, output_value);
if (print){
print_gdf_column(group_by_output_key[0]);
print_gdf_column(group_by_output_value);
}
}
}
template <agg_op op>
map_t compute_reference_solution() {
map_t key_val_map;
if (op != agg_op::AVG) {
AggOp<op> agg;
for (size_t i = 0; i < input_value.size(); ++i) {
auto l_key = input_key[i];
auto sch = key_val_map.find(l_key);
if (sch != key_val_map.end()) {
key_val_map[l_key] = agg(sch->second, input_value[i]);
} else {
key_val_map[l_key] = agg(input_value[i]);
}
}
} else {
std::map<std::string, size_t> counters;
AggOp<agg_op::SUM> agg;
for (size_t i = 0; i < input_value.size(); ++i) {
auto l_key = input_key[i];
counters[l_key]++;
auto sch = key_val_map.find(l_key);
if (sch != key_val_map.end()) {
key_val_map[l_key] = agg(sch->second, input_value[i]);
} else {
key_val_map[l_key] = agg(input_value[i]);
}
}
for (auto& e : key_val_map) {
e.second = e.second/counters[e.first];
}
}
return key_val_map;
}
void compare_gdf_result(map_t& reference_map) {
ASSERT_EQ(output_value.size(), reference_map.size()) <<
"Size of gdf result does not match reference result\n";
ASSERT_EQ(output_key.size(), output_value.size()) <<
"Mismatch between aggregation and group by column size.";
for (size_t i = 0; i < output_value.size(); ++i) {
auto sch = reference_map.find(output_key[i]);
bool found = (sch != reference_map.end());
EXPECT_EQ(found, true);
if (!found) { continue; }
if (std::is_integral<output_t>::value) {
EXPECT_EQ(sch->second, output_value[i]);
} else {
EXPECT_NEAR(sch->second, output_value[i], sch->second/100.0);
}
//ensure no duplicates in gdf output
reference_map.erase(sch);
}
}
};
TEST_F(NVCategoryGroupByTest, TEST_NVCATEGORY_GROUPBY)
{
bool print = false;
const int rows_size = 64;
const agg_op op = agg_op::AVG;
const char ** string_data = cudf::test::generate_string_data(rows_size, length, print);
input_key = std::vector<std::string>(string_data, string_data + rows_size);
gdf_column * category_column = cudf::test::create_nv_category_column_strings(string_data, rows_size);
gdf_raw_input_key_columns.push_back(category_column);
int32_t* host_values = generate_int_data(rows_size, 10, print);
input_value = std::vector<int32_t>(host_values, host_values + rows_size);
gdf_raw_input_val_column = create_column_ints(host_values, rows_size);
gdf_column * gdf_raw_output_key_column = cudf::test::create_nv_category_column(rows_size, true);
gdf_raw_output_key_columns.push_back(gdf_raw_output_key_column);
gdf_raw_output_val_column = create_column_constant(rows_size, 1);
this->compute_gdf_result(op, GDF_SUCCESS, print);
auto reference_map = this->compute_reference_solution<op>();
this->compare_gdf_result(reference_map);
}
TEST_F(NVCategoryTest, TEST_NVCATEGORY_COMPARISON)
{
bool print = false;
const int rows_size = 64;
const size_t length = 1;
const char ** left_string_data = cudf::test::generate_string_data(rows_size, length, print);
const char ** right_string_data = cudf::test::generate_string_data(rows_size, length, print);
std::vector<std::string> left_host_column (left_string_data, left_string_data + rows_size);
std::vector<std::string> right_host_column (right_string_data, right_string_data + rows_size);
gdf_column * left_column = cudf::test::create_nv_category_column_strings(left_string_data, rows_size);
gdf_column * right_column = cudf::test::create_nv_category_column_strings(right_string_data, rows_size);
gdf_column * output_column = create_boolean_column(rows_size);
NVStrings * temp_string = static_cast<NVCategory *>(right_column->dtype_info.category)->to_strings();
NVCategory * new_category = static_cast<NVCategory *>(left_column->dtype_info.category)->add_strings(
*temp_string);
unsigned int * indices;
EXPECT_EQ(RMM_ALLOC(&indices, sizeof(unsigned int) * new_category->size(), 0), RMM_SUCCESS);
//now reset data
new_category->get_values( (int*)indices, true);
CUDA_TRY( hipMemcpy(left_column->data,indices,sizeof(unsigned int) * left_column->size,hipMemcpyDeviceToDevice) );
CUDA_TRY( hipMemcpy(right_column->data,indices + left_column->size,sizeof(unsigned int) * right_column->size,hipMemcpyDeviceToDevice) );
if(print){
print_gdf_column(left_column);
print_gdf_column(right_column);
}
left_column->dtype_info.category = new_category;
right_column->dtype_info.category = new_category;
CUDF_EXPECT_NO_THROW(cudf::binary_operation(output_column, left_column, right_column, gdf_binary_operator::GDF_EQUAL));
int8_t * data = new int8_t[rows_size];
CUDA_TRY( hipMemcpy(data, output_column->data, sizeof(int8_t) * rows_size, hipMemcpyDeviceToHost) );
for(size_t i = 0; i < rows_size; ++i){
EXPECT_EQ((bool)data[i], left_host_column[i] == right_host_column[i]);
}
delete data;
}
struct NVCategoryConcatTest : public GdfTest
{
std::vector<gdf_column *> concat_columns;
gdf_column * concat_out;
const int length = 2;
std::vector<std::string> compute_gdf_result(bool print = false){
size_t keys_size = 0;
for(size_t i=0;i<concat_columns.size();i++)
keys_size+=concat_columns[i]->size;
concat_out = cudf::test::create_nv_category_column(keys_size, true);
gdf_error err = gdf_column_concat(concat_out, concat_columns.data(), concat_columns.size());
EXPECT_EQ(GDF_SUCCESS, err);
if(print){
print_gdf_column(concat_out);
}
NVStrings * temp_strings = static_cast<NVCategory *>(concat_out->dtype_info.category)->gather_strings(
(nv_category_index_type *) concat_out->data, keys_size, DEVICE_ALLOCATED );
char** host_strings = new char*[keys_size];
for(size_t i=0;i<keys_size;i++){
host_strings[i]=new char[length+1];
}
temp_strings->to_host(host_strings, 0, keys_size);
for(size_t i=0;i<keys_size;i++){
host_strings[i][length]=0;
}
std::vector<std::string> strings_vector(host_strings, host_strings + keys_size);
NVStrings::destroy(temp_strings);
for(size_t i = 0; i < keys_size; i++){
delete host_strings[i];
}
delete host_strings;
return strings_vector;
}
};
TEST_F(NVCategoryConcatTest, concat_test){
bool print = false;
const int rows_size = 64;
const char *** string_data = new const char**[2];
string_data[0] = cudf::test::generate_string_data(rows_size, length, print);
string_data[1] = cudf::test::generate_string_data(rows_size, length, print);
concat_columns.resize(2);
concat_columns[0] = cudf::test::create_nv_category_column_strings(string_data[0], rows_size);
concat_columns[1] = cudf::test::create_nv_category_column_strings(string_data[1], rows_size);
std::vector<std::string> reference_result;
reference_result.insert(reference_result.end(), string_data[0], string_data[0] + rows_size);
reference_result.insert(reference_result.end(), string_data[1], string_data[1] + rows_size);
if(print){
print_gdf_column(concat_columns[0]);
print_gdf_column(concat_columns[1]);
}
std::vector<std::string> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
// Selects the kind of join operation that is performed
enum struct join_op
{
INNER,
LEFT,
FULL
};
// Each element of the result will be an index into the left and right columns where
// left_columns[left_index] == right_columns[right_index]
using result_type = typename std::pair<int, int>;
// Define stream operator for a std::pair for conveinience of printing results.
// Needs to be in the std namespace to work with std::copy
namespace std{
template <typename first_t, typename second_t>
std::ostream& operator<<(std::ostream& os, std::pair<first_t, second_t> const & p)
{
os << p.first << "\t" << p.second;
std::cout << "\n";
return os;
}
}
struct NVCategoryJoinTest : public GdfTest
{
// Containers for the raw pointers to the gdf_columns that will be used as
// input to the gdf_join functions
std::vector<gdf_column*> gdf_raw_left_columns;
std::vector<gdf_column*> gdf_raw_right_columns;
std::vector<gdf_column*> gdf_raw_result_columns;
std::vector<std::string> left_string_column;
std::vector<std::string> right_string_column;
gdf_context ctxt{0, GDF_HASH, 0};
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Computes a reference solution for joining the left and right sets of columns
*
* @Param print Option to print the solution for debug
* @Param sort Option to sort the solution. This is necessary for comparison against the gdf solution
*
* @Returns A vector of 'result_type' where result_type is a structure with a left_index, right_index
* where left_columns[left_index] == right_columns[right_index]
*/
/* ----------------------------------------------------------------------------*/
std::vector<result_type> compute_reference_solution(join_op op, bool print = false, bool sort = true)
{
using key_type = std::string;
using value_type = size_t;
// Multimap used to compute the reference solution
std::multimap<key_type, value_type> the_map;
// Build hash table that maps the first right columns' values to their row index in the column
std::vector<key_type> const & build_column = right_string_column;
for(size_t right_index = 0; right_index < build_column.size(); ++right_index){
the_map.insert(std::make_pair(build_column[right_index], right_index));
}
std::vector<result_type> reference_result;
// Probe hash table with first left column
std::vector<key_type> const & probe_column = left_string_column;
for(size_t left_index = 0; left_index < probe_column.size(); ++left_index)
{
bool match{false};
// Find all keys that match probe_key
const auto probe_key = probe_column[left_index];
auto range = the_map.equal_range(probe_key);
// Every element in the returned range identifies a row in the first right column that
// matches the probe_key. Need to check if all other columns also match
for(auto i = range.first; i != range.second; ++i)
{
const auto right_index = i->second;
if(left_string_column[left_index] == right_string_column[right_index]){
reference_result.emplace_back(left_index, right_index);
match = true;
}
}
// For left joins, insert a NULL if no match is found
if((false == match) && ((op == join_op::LEFT) || (op == join_op::FULL))){
constexpr int JoinNullValue{-1};
reference_result.emplace_back(left_index, JoinNullValue);
}
}
if (op == join_op::FULL)
{
the_map.clear();
// Build hash table that maps the first left columns' values to their row index in the column
for(size_t left_index = 0; left_index < probe_column.size(); ++left_index){
the_map.insert(std::make_pair(probe_column[left_index], left_index));
}
// Probe the hash table with first right column
// Add rows where a match for the right column does not exist
for(size_t right_index = 0; right_index < build_column.size(); ++right_index)
{
const auto probe_key = build_column[right_index];
auto search = the_map.find(probe_key);
if ((search == the_map.end()))
{
constexpr int JoinNullValue{-1};
reference_result.emplace_back(JoinNullValue, right_index);
}
}
}
// Sort the result
if(sort)
{
std::sort(reference_result.begin(), reference_result.end());
}
if(print)
{
std::cout << "\nReference result size: " << reference_result.size() << std::endl;
std::cout << "left index, right index" << std::endl;
std::copy(reference_result.begin(), reference_result.end(), std::ostream_iterator<result_type>(std::cout, ""));
std::cout << "\n";
}
return reference_result;
}
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Computes the result of joining the left and right sets of columns with the libgdf functions
*
* @Param op The join operator
* @Param left_join_idx The vector of column indexes to join from left dataframe
* @Param right_join_idx The vector of column indexes to join from right dataframe
* @Param print Option to print the result computed by the libgdf function
* @Param sort Option to sort the result. This is required to compare the result against the reference solution
*/
/* ----------------------------------------------------------------------------*/
std::vector<result_type> compute_gdf_result(join_op op, std::vector<int> left_join_idx, std::vector<int> right_join_idx, bool print = false, bool sort = true, gdf_error expected_result = GDF_SUCCESS)
{
EXPECT_EQ(gdf_raw_left_columns.size(), gdf_raw_right_columns.size()) << "Mismatch columns size";
EXPECT_EQ(left_join_idx.size(), right_join_idx.size()) << "Mismatch join indexes size";
gdf_column left_result{};
gdf_column right_result{};
left_result.size = 0;
right_result.size = 0;
size_t num_columns = gdf_raw_left_columns.size();
size_t result_num_cols = gdf_raw_left_columns.size() + gdf_raw_right_columns.size() - left_join_idx.size();
gdf_error result_error{GDF_SUCCESS};
gdf_column ** left_gdf_columns = gdf_raw_left_columns.data();
gdf_column ** right_gdf_columns = gdf_raw_right_columns.data();
gdf_column ** result_columns = gdf_raw_result_columns.data();
switch(op)
{
case join_op::LEFT:
{
result_error = gdf_left_join(
left_gdf_columns, num_columns, left_join_idx.data(),
right_gdf_columns, num_columns, right_join_idx.data(),
left_join_idx.size(),
result_num_cols, result_columns,
&left_result, &right_result,
&ctxt);
break;
}
case join_op::INNER:
{
result_error = gdf_inner_join(
left_gdf_columns, num_columns, left_join_idx.data(),
right_gdf_columns, num_columns, right_join_idx.data(),
left_join_idx.size(),
result_num_cols, result_columns,
&left_result, &right_result,
&ctxt);
break;
}
case join_op::FULL:
{
result_error = gdf_full_join(
left_gdf_columns, num_columns, left_join_idx.data(),
right_gdf_columns, num_columns, right_join_idx.data(),
left_join_idx.size(),
result_num_cols, result_columns,
&left_result, &right_result,
&ctxt);
break;
}
default:
std::cout << "Invalid join method" << std::endl;
EXPECT_TRUE(false);
}
EXPECT_EQ(expected_result, result_error) << "The gdf join function did not complete successfully";
// If the expected result was not GDF_SUCCESS, then this test was testing for a
// specific error condition, in which case we return imediately and do not do
// any further work on the output
if(GDF_SUCCESS != expected_result){
return std::vector<result_type>();
}
EXPECT_EQ(left_result.size, right_result.size) << "Join output size mismatch";
// The output is an array of size `n` where the first n/2 elements are the
// left_indices and the last n/2 elements are the right indices
size_t total_pairs = left_result.size;
size_t output_size = total_pairs*2;
int * l_join_output = static_cast<int*>(left_result.data);
int * r_join_output = static_cast<int*>(right_result.data);
// Host vector to hold gdf join output
std::vector<int> host_result(output_size);
// Copy result of gdf join to the host
EXPECT_EQ(hipMemcpy(host_result.data(),
l_join_output, total_pairs * sizeof(int), hipMemcpyDeviceToHost), hipSuccess);
EXPECT_EQ(hipMemcpy(host_result.data() + total_pairs,
r_join_output, total_pairs * sizeof(int), hipMemcpyDeviceToHost), hipSuccess);
// Free the original join result
if(output_size > 0){
gdf_column_free(&left_result);
gdf_column_free(&right_result);
}
// Host vector of result_type pairs to hold final result for comparison to reference solution
std::vector<result_type> host_pair_result(total_pairs);
// Copy raw output into corresponding result_type pair
for(size_t i = 0; i < total_pairs; ++i){
host_pair_result[i].first = host_result[i];
host_pair_result[i].second = host_result[i + total_pairs];
}
// Sort the output for comparison to reference solution
if(sort){
std::sort(host_pair_result.begin(), host_pair_result.end());
}
if(print){
std::cout << "\nGDF result size: " << host_pair_result.size() << std::endl;
std::cout << "left index\tright index" << std::endl;
std::copy(host_pair_result.begin(), host_pair_result.end(), std::ostream_iterator<result_type>(std::cout, ""));
std::cout << "\n";
}
return host_pair_result;
}
void check_output(join_op op, std::vector<result_type>& reference_result, size_t length, bool print=false, bool sort=true){
gdf_column* result_column = gdf_raw_result_columns[0];
if(print){
std::cout<<"Raw string result:\n";
print_gdf_column(result_column);
}
size_t result_size = result_column->size;
if(result_size>0){
NVStrings * temp_strings = static_cast<NVCategory *>(result_column->dtype_info.category)->gather_strings(
(nv_category_index_type *) result_column->data, result_size , DEVICE_ALLOCATED );
char** host_strings = new char*[result_size];
for(size_t i=0;i<result_size;i++){
host_strings[i]=new char[length+1];
}
temp_strings->to_host(host_strings, 0, result_size);
for(size_t i=0;i<result_size;i++){
host_strings[i][length]=0;
}
std::vector<std::string> result_output = std::vector<std::string>(host_strings, host_strings + result_size);
std::vector<std::string> reference_output;
for(size_t i=0; i<result_size; i++){
if(reference_result[i].first != -1)
reference_output.push_back(left_string_column[reference_result[i].first]);
else
reference_output.push_back(right_string_column[reference_result[i].second]);
}
EXPECT_EQ(reference_output.size(), result_size);
if(sort){
std::sort(result_output.begin(), result_output.end());
std::sort(reference_output.begin(), reference_output.end());
}
if(print){
for(auto str : result_output){
std::cout<<str<<"\t";
}
std::cout<<std::endl;
}
NVStrings::destroy(temp_strings);
for(size_t i = 0; i < result_size; i++){
delete host_strings[i];
}
delete host_strings;
for(size_t i=0; i<result_size; i++){
EXPECT_EQ(reference_output[i], result_output[i]);
}
}
}
};
TEST_F(NVCategoryJoinTest, join_test){
bool print = false;
size_t rows_size = 64;
join_op op = join_op::INNER;
size_t length = 1;
const char ** left_string_data = cudf::test::generate_string_data(rows_size, length, print);
const char ** right_string_data = cudf::test::generate_string_data(rows_size, length, print);
left_string_column = std::vector<std::string> (left_string_data, left_string_data + rows_size);
right_string_column = std::vector<std::string> (right_string_data, right_string_data + rows_size);
gdf_column * left_column = cudf::test::create_nv_category_column_strings(left_string_data, rows_size);
gdf_column * right_column = cudf::test::create_nv_category_column_strings(right_string_data, rows_size);
gdf_column * result_column = cudf::test::create_nv_category_column_strings(right_string_data, rows_size);
if(print){
std::cout<<"Raw string indexes:\n";
print_gdf_column(left_column);
print_gdf_column(right_column);
}
gdf_raw_left_columns.push_back(left_column);
gdf_raw_right_columns.push_back(right_column);
gdf_raw_result_columns.push_back(result_column);
std::vector<result_type> reference_result = this->compute_reference_solution(op, print);
std::vector<int> left_join_idx={0};
std::vector<int> right_join_idx={0};
std::vector<result_type> gdf_result = this->compute_gdf_result(op, left_join_idx, right_join_idx, print);
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
this->check_output(op, reference_result, length, print);
}
TEST_F(NVCategoryJoinTest, join_test_nulls){
bool print = false;
size_t rows_size = 16;
join_op op = join_op::INNER;
size_t length = 1;
const char ** left_string_data = cudf::test::generate_string_data(rows_size, length, print);
const char ** right_string_data = cudf::test::generate_string_data(rows_size, length, print);
left_string_column = std::vector<std::string> (left_string_data, left_string_data + rows_size);
right_string_column = std::vector<std::string> (right_string_data, right_string_data + rows_size);
gdf_column * left_column = cudf::test::create_nv_category_column_strings(left_string_data, rows_size);
gdf_column * right_column = cudf::test::create_nv_category_column_strings(right_string_data, rows_size);
left_column->valid = nullptr;
right_column->valid = nullptr;
if(print){
std::cout<<"Raw string indexes:\n";
print_gdf_column(left_column);
print_gdf_column(right_column);
}
gdf_raw_left_columns.push_back(left_column);
gdf_raw_right_columns.push_back(right_column);
std::vector<result_type> reference_result = this->compute_reference_solution(op, print);
std::vector<int> left_join_idx={0};
std::vector<int> right_join_idx={0};
std::vector<result_type> gdf_result = this->compute_gdf_result(op, left_join_idx, right_join_idx, print);
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TEST_F(NVCategoryJoinTest, join_test_bug){
bool print = false;
join_op op = join_op::LEFT;
const size_t left_size = 3;
const char *column_left_b[] = {"one ", "two ", "NO MATCH"};
int column_left_a[] = { 5, 14, 8 };
const size_t right_size = 2;
const char *column_right_b[] = {"two ", "one "};
int column_left_c[] = { 0, 1 };
left_string_column = std::vector<std::string> (column_left_b, column_left_b + left_size);
right_string_column = std::vector<std::string> (column_right_b, column_right_b + right_size);
gdf_column * left_column = cudf::test::create_nv_category_column_strings(column_left_b, left_size);
left_column->valid = nullptr;
gdf_column * left_non_join_column = create_column_ints(column_left_a, left_size);
left_non_join_column ->valid = nullptr;
gdf_column * right_column = cudf::test::create_nv_category_column_strings(column_right_b, right_size);
right_column->valid = nullptr;
gdf_column * right_non_join_column = create_column_ints(column_left_c, right_size);
right_non_join_column->valid = nullptr;
left_column->valid = nullptr;
right_column->valid = nullptr;
if(print){
std::cout<<"Raw string indexes:\n";
print_gdf_column(left_column);
print_gdf_column(right_column);
}
gdf_raw_left_columns.push_back(left_non_join_column);
gdf_raw_left_columns.push_back(left_column);
gdf_raw_right_columns.push_back(right_non_join_column);
gdf_raw_right_columns.push_back(right_column);
gdf_column * result_column_nonjoin_left = create_column_ints(column_left_a, left_size);
gdf_column * result_column_nonjoin_right = create_column_ints(column_left_a, left_size);
gdf_column * result_column_joined = cudf::test::create_nv_category_column_strings(column_left_b, left_size);
gdf_raw_result_columns.push_back(result_column_nonjoin_left);
gdf_raw_result_columns.push_back(result_column_joined);
gdf_raw_result_columns.push_back(result_column_nonjoin_right);
std::vector<result_type> reference_result = this->compute_reference_solution(op, print);
std::vector<int> left_join_idx={1};
std::vector<int> right_join_idx={1};
std::vector<result_type> gdf_result = this->compute_gdf_result(op, left_join_idx, right_join_idx, print);
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
if(print){
std::cout<<"Output columns:\n";
for(size_t i=0; i<gdf_raw_result_columns.size(); i++){
print_gdf_column(gdf_raw_result_columns[i]);
std::cout<<"\n-----\n";
}
}
}
| b630f29cf73acddd8ebfb0dfac0b8194e607c2e8.cu | /*
* Copyright 2019 BlazingDB, Inc.
* Copyright 2019 Felipe Aramburu <[email protected]>
* Copyright 2018 Rommel Quintanilla <[email protected]>
* Copyright 2019 William Scott Malpica <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <cudf/binaryop.hpp>
#include <cudf/cudf.h>
#include <utilities/cudf_utils.h>
#include <cudf/functions.h>
#include <cudf/types.h>
#include <iostream>
#include <random>
#include <nvstrings/NVCategory.h>
#include <nvstrings/NVStrings.h>
#include <rmm/rmm.h>
#include <cstring>
#include <tests/utilities/cudf_test_utils.cuh>
#include <tests/utilities/cudf_test_fixtures.h>
#include <tests/utilities/nvcategory_utils.cuh>
#include <bitmask/legacy/bit_mask.cuh>
// See this header for all of the handling of valids' vectors
#include <tests/utilities/valid_vectors.h>
#include <string/nvcategory_util.hpp>
gdf_column * create_column_ints(int32_t* host_data, gdf_size_type num_rows){
gdf_column * column = new gdf_column{};
int32_t * data;
EXPECT_EQ(RMM_ALLOC(&data, num_rows * sizeof(int32_t) , 0), RMM_SUCCESS);
CUDA_TRY( cudaMemcpy(data, host_data, sizeof(int32_t) * num_rows, cudaMemcpyHostToDevice) );
bit_mask::bit_mask_t * valid;
bit_mask::create_bit_mask(&valid, num_rows,1);
gdf_error err = gdf_column_view(column,
(void *) data,
(gdf_valid_type *)valid,
num_rows,
GDF_INT32);
return column;
}
gdf_column * create_column_constant(gdf_size_type num_rows, int value){
gdf_column * column = new gdf_column{};
int * data;
bit_mask::bit_mask_t * valid;
bit_mask::create_bit_mask(&valid, num_rows,1);
EXPECT_EQ(RMM_ALLOC(&data, num_rows * sizeof(int) , 0), RMM_SUCCESS);
cudaMemset(data,value,sizeof(int) * num_rows);
gdf_error err = gdf_column_view(column,
(void *) data,
(gdf_valid_type *) valid,
num_rows,
GDF_INT32);
return column;
}
int32_t* generate_int_data(gdf_size_type num_rows, size_t max_value, bool print=false){
int32_t* host_data = new int32_t[num_rows];
for(gdf_size_type row_index = 0; row_index < num_rows; row_index++){
host_data[row_index] = std::rand() % max_value;
if(print)
std::cout<<host_data[row_index]<<"\t";
}
if(print)
std::cout<<std::endl;
return host_data;
}
struct NVCategoryTest : public GdfTest
{
gdf_column * create_boolean_column(gdf_size_type num_rows){
gdf_column * column = new gdf_column{};
int * data;
bit_mask::bit_mask_t * valid;
bit_mask::create_bit_mask(&valid, num_rows,1);
EXPECT_EQ(RMM_ALLOC(&data, num_rows * sizeof(int8_t) , 0), RMM_SUCCESS);
gdf_error err = gdf_column_view(column,
(void *) data,
(gdf_valid_type *) valid,
num_rows,
GDF_INT8);
return column;
}
gdf_column * create_indices_column(gdf_size_type num_rows){
gdf_column * column = new gdf_column{};
int * data;
bit_mask::bit_mask_t * valid;
bit_mask::create_bit_mask(&valid, num_rows,1);
EXPECT_EQ(RMM_ALLOC(&data, num_rows * sizeof(int) , 0), RMM_SUCCESS);
gdf_error err = gdf_column_view(column,
(void *) data,
(gdf_valid_type *) valid,
num_rows,
GDF_INT32);
return column;
}
};
//todo refactor tests
TEST_F(NVCategoryTest, TEST_NVCATEGORY_SORTING)
{
bool print = false;
const int rows_size = 64;
const int length = 2;
const char ** string_data = cudf::test::generate_string_data(rows_size, length, print);
gdf_column * column = cudf::test::create_nv_category_column_strings(string_data, rows_size);
gdf_column * output_column = create_indices_column(rows_size);
gdf_column ** input_columns = new gdf_column *[1];
input_columns[0] = column;
if(print){
print_gdf_column(input_columns[0]);
}
int8_t *asc_desc;
EXPECT_EQ(RMM_ALLOC(&asc_desc, 1, 0), RMM_SUCCESS);
int8_t minus_one = -1; //desc
cudaMemset(asc_desc, minus_one, 1);
gdf_context context;
context.flag_null_sort_behavior = GDF_NULL_AS_LARGEST;
//doesnt output nvcategory type columns so works as is
gdf_error err = gdf_order_by(input_columns, asc_desc, 1, output_column, &context);
EXPECT_EQ(GDF_SUCCESS, err);
if(print){
print_gdf_column(output_column);
}
int* host_data = new int[rows_size];
CUDA_TRY( cudaMemcpy(
host_data,
output_column->data,
sizeof(int) * output_column->size,
cudaMemcpyDeviceToHost) );
std::vector<std::string> strings_vector(string_data, string_data + rows_size);
for(size_t i = 0; i < rows_size - 1; i++){
EXPECT_TRUE(strings_vector[host_data[i]] >= strings_vector[host_data[i+1]]);
}
}
// Selects the kind of join operation that is performed
enum struct agg_op
{
MIN,//0
MAX,//1
SUM,//2
CNT,//3
AVG //4
};
template <agg_op op>
struct AggOp {
template <typename T>
T operator()(const T a, const T b) {
return static_cast<T>(0);
}
template <typename T>
T operator()(const T a) {
return static_cast<T>(0);
}
};
template<>
struct AggOp<agg_op::MIN> {
template <typename T>
T operator()(const T a, const T b) {
return (a < b)? a : b;
}
template <typename T>
T operator()(const T a) {
return a;
}
};
template<>
struct AggOp<agg_op::MAX> {
template <typename T>
T operator()(const T a, const T b) {
return (a > b)? a : b;
}
template <typename T>
T operator()(const T a) {
return a;
}
};
template<>
struct AggOp<agg_op::SUM> {
template <typename T>
T operator()(const T a, const T b) {
return a + b;
}
template <typename T>
T operator()(const T a) {
return a;
}
};
template<>
struct AggOp<agg_op::CNT> {
size_t count{0};
template <typename T>
T operator()(const T a, const T b) {
count = a+1;
return count;
}
template <typename T>
T operator()(const T a) {
count = 1;
return count;
}
};
struct NVCategoryGroupByTest : public GdfTest
{
using output_t = int32_t;
using map_t = std::map<std::string, output_t>;
const int length = 1;
std::vector<std::string> input_key;
std::vector<output_t> input_value;
std::vector<std::string> output_key;
std::vector<output_t> output_value;
gdf_context ctxt = {0, GDF_HASH, 1};
// Containers for the raw pointers to the gdf_columns that will be used as input
// to the gdf_group_by functions
std::vector<gdf_column*> gdf_raw_input_key_columns;
gdf_column* gdf_raw_input_val_column;
std::vector<gdf_column*> gdf_raw_output_key_columns;
gdf_column* gdf_raw_output_val_column;
void copy_output(gdf_column* group_by_output_key, std::vector<std::string>& output_key,
gdf_column* group_by_output_value, std::vector<output_t>& output_value){
const size_t keys_size = group_by_output_key->size;
NVStrings * temp_strings = static_cast<NVCategory *>(group_by_output_key->dtype_info.category)->gather_strings(
(nv_category_index_type *) group_by_output_key->data, keys_size, DEVICE_ALLOCATED );
char** host_strings = new char*[keys_size];
for(size_t i=0;i<keys_size;i++){
host_strings[i]=new char[length+1];
}
temp_strings->to_host(host_strings, 0, keys_size);
for(size_t i=0;i<keys_size;i++){
host_strings[i][length]=0;
}
output_key = std::vector<std::string>(host_strings, host_strings + keys_size);
NVStrings::destroy(temp_strings);
for(size_t i = 0; i < keys_size; i++){
delete host_strings[i];
}
delete host_strings;
output_value.resize(group_by_output_value->size);
CUDA_TRY( cudaMemcpy(output_value.data(), group_by_output_value->data, sizeof(output_t) * group_by_output_value->size, cudaMemcpyDeviceToHost) );
}
void compute_gdf_result(agg_op op, const gdf_error expected_error = GDF_SUCCESS, bool print=false)
{
const int num_columns = gdf_raw_input_key_columns.size();
gdf_error error{GDF_SUCCESS};
gdf_column **group_by_input_key = gdf_raw_input_key_columns.data();
gdf_column *group_by_input_value = gdf_raw_input_val_column;
gdf_column **group_by_output_key = gdf_raw_output_key_columns.data();
gdf_column *group_by_output_value = gdf_raw_output_val_column;
switch(op)
{
case agg_op::MIN:
{
error = gdf_group_by_min(num_columns,
group_by_input_key,
group_by_input_value,
nullptr,
group_by_output_key,
group_by_output_value,
&ctxt);
break;
}
case agg_op::MAX:
{
error = gdf_group_by_max(num_columns,
group_by_input_key,
group_by_input_value,
nullptr,
group_by_output_key,
group_by_output_value,
&ctxt);
break;
}
case agg_op::SUM:
{
error = gdf_group_by_sum(num_columns,
group_by_input_key,
group_by_input_value,
nullptr,
group_by_output_key,
group_by_output_value,
&ctxt);
break;
}
case agg_op::CNT:
{
error = gdf_group_by_count(num_columns,
group_by_input_key,
group_by_input_value,
nullptr,
group_by_output_key,
group_by_output_value,
&ctxt);
break;
}
case agg_op::AVG:
{
error = gdf_group_by_avg(num_columns,
group_by_input_key,
group_by_input_value,
nullptr,
group_by_output_key,
group_by_output_value,
&ctxt);
break;
}
default:
error = GDF_INVALID_AGGREGATOR;
}
EXPECT_EQ(expected_error, error) << "The gdf group by function did not complete successfully";
if (GDF_SUCCESS == expected_error ) {
copy_output(
group_by_output_key[0], output_key,
group_by_output_value, output_value);
if (print){
print_gdf_column(group_by_output_key[0]);
print_gdf_column(group_by_output_value);
}
}
}
template <agg_op op>
map_t compute_reference_solution() {
map_t key_val_map;
if (op != agg_op::AVG) {
AggOp<op> agg;
for (size_t i = 0; i < input_value.size(); ++i) {
auto l_key = input_key[i];
auto sch = key_val_map.find(l_key);
if (sch != key_val_map.end()) {
key_val_map[l_key] = agg(sch->second, input_value[i]);
} else {
key_val_map[l_key] = agg(input_value[i]);
}
}
} else {
std::map<std::string, size_t> counters;
AggOp<agg_op::SUM> agg;
for (size_t i = 0; i < input_value.size(); ++i) {
auto l_key = input_key[i];
counters[l_key]++;
auto sch = key_val_map.find(l_key);
if (sch != key_val_map.end()) {
key_val_map[l_key] = agg(sch->second, input_value[i]);
} else {
key_val_map[l_key] = agg(input_value[i]);
}
}
for (auto& e : key_val_map) {
e.second = e.second/counters[e.first];
}
}
return key_val_map;
}
void compare_gdf_result(map_t& reference_map) {
ASSERT_EQ(output_value.size(), reference_map.size()) <<
"Size of gdf result does not match reference result\n";
ASSERT_EQ(output_key.size(), output_value.size()) <<
"Mismatch between aggregation and group by column size.";
for (size_t i = 0; i < output_value.size(); ++i) {
auto sch = reference_map.find(output_key[i]);
bool found = (sch != reference_map.end());
EXPECT_EQ(found, true);
if (!found) { continue; }
if (std::is_integral<output_t>::value) {
EXPECT_EQ(sch->second, output_value[i]);
} else {
EXPECT_NEAR(sch->second, output_value[i], sch->second/100.0);
}
//ensure no duplicates in gdf output
reference_map.erase(sch);
}
}
};
TEST_F(NVCategoryGroupByTest, TEST_NVCATEGORY_GROUPBY)
{
bool print = false;
const int rows_size = 64;
const agg_op op = agg_op::AVG;
const char ** string_data = cudf::test::generate_string_data(rows_size, length, print);
input_key = std::vector<std::string>(string_data, string_data + rows_size);
gdf_column * category_column = cudf::test::create_nv_category_column_strings(string_data, rows_size);
gdf_raw_input_key_columns.push_back(category_column);
int32_t* host_values = generate_int_data(rows_size, 10, print);
input_value = std::vector<int32_t>(host_values, host_values + rows_size);
gdf_raw_input_val_column = create_column_ints(host_values, rows_size);
gdf_column * gdf_raw_output_key_column = cudf::test::create_nv_category_column(rows_size, true);
gdf_raw_output_key_columns.push_back(gdf_raw_output_key_column);
gdf_raw_output_val_column = create_column_constant(rows_size, 1);
this->compute_gdf_result(op, GDF_SUCCESS, print);
auto reference_map = this->compute_reference_solution<op>();
this->compare_gdf_result(reference_map);
}
TEST_F(NVCategoryTest, TEST_NVCATEGORY_COMPARISON)
{
bool print = false;
const int rows_size = 64;
const size_t length = 1;
const char ** left_string_data = cudf::test::generate_string_data(rows_size, length, print);
const char ** right_string_data = cudf::test::generate_string_data(rows_size, length, print);
std::vector<std::string> left_host_column (left_string_data, left_string_data + rows_size);
std::vector<std::string> right_host_column (right_string_data, right_string_data + rows_size);
gdf_column * left_column = cudf::test::create_nv_category_column_strings(left_string_data, rows_size);
gdf_column * right_column = cudf::test::create_nv_category_column_strings(right_string_data, rows_size);
gdf_column * output_column = create_boolean_column(rows_size);
NVStrings * temp_string = static_cast<NVCategory *>(right_column->dtype_info.category)->to_strings();
NVCategory * new_category = static_cast<NVCategory *>(left_column->dtype_info.category)->add_strings(
*temp_string);
unsigned int * indices;
EXPECT_EQ(RMM_ALLOC(&indices, sizeof(unsigned int) * new_category->size(), 0), RMM_SUCCESS);
//now reset data
new_category->get_values( (int*)indices, true);
CUDA_TRY( cudaMemcpy(left_column->data,indices,sizeof(unsigned int) * left_column->size,cudaMemcpyDeviceToDevice) );
CUDA_TRY( cudaMemcpy(right_column->data,indices + left_column->size,sizeof(unsigned int) * right_column->size,cudaMemcpyDeviceToDevice) );
if(print){
print_gdf_column(left_column);
print_gdf_column(right_column);
}
left_column->dtype_info.category = new_category;
right_column->dtype_info.category = new_category;
CUDF_EXPECT_NO_THROW(cudf::binary_operation(output_column, left_column, right_column, gdf_binary_operator::GDF_EQUAL));
int8_t * data = new int8_t[rows_size];
CUDA_TRY( cudaMemcpy(data, output_column->data, sizeof(int8_t) * rows_size, cudaMemcpyDeviceToHost) );
for(size_t i = 0; i < rows_size; ++i){
EXPECT_EQ((bool)data[i], left_host_column[i] == right_host_column[i]);
}
delete data;
}
struct NVCategoryConcatTest : public GdfTest
{
std::vector<gdf_column *> concat_columns;
gdf_column * concat_out;
const int length = 2;
std::vector<std::string> compute_gdf_result(bool print = false){
size_t keys_size = 0;
for(size_t i=0;i<concat_columns.size();i++)
keys_size+=concat_columns[i]->size;
concat_out = cudf::test::create_nv_category_column(keys_size, true);
gdf_error err = gdf_column_concat(concat_out, concat_columns.data(), concat_columns.size());
EXPECT_EQ(GDF_SUCCESS, err);
if(print){
print_gdf_column(concat_out);
}
NVStrings * temp_strings = static_cast<NVCategory *>(concat_out->dtype_info.category)->gather_strings(
(nv_category_index_type *) concat_out->data, keys_size, DEVICE_ALLOCATED );
char** host_strings = new char*[keys_size];
for(size_t i=0;i<keys_size;i++){
host_strings[i]=new char[length+1];
}
temp_strings->to_host(host_strings, 0, keys_size);
for(size_t i=0;i<keys_size;i++){
host_strings[i][length]=0;
}
std::vector<std::string> strings_vector(host_strings, host_strings + keys_size);
NVStrings::destroy(temp_strings);
for(size_t i = 0; i < keys_size; i++){
delete host_strings[i];
}
delete host_strings;
return strings_vector;
}
};
TEST_F(NVCategoryConcatTest, concat_test){
bool print = false;
const int rows_size = 64;
const char *** string_data = new const char**[2];
string_data[0] = cudf::test::generate_string_data(rows_size, length, print);
string_data[1] = cudf::test::generate_string_data(rows_size, length, print);
concat_columns.resize(2);
concat_columns[0] = cudf::test::create_nv_category_column_strings(string_data[0], rows_size);
concat_columns[1] = cudf::test::create_nv_category_column_strings(string_data[1], rows_size);
std::vector<std::string> reference_result;
reference_result.insert(reference_result.end(), string_data[0], string_data[0] + rows_size);
reference_result.insert(reference_result.end(), string_data[1], string_data[1] + rows_size);
if(print){
print_gdf_column(concat_columns[0]);
print_gdf_column(concat_columns[1]);
}
std::vector<std::string> gdf_result = this->compute_gdf_result();
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
// Selects the kind of join operation that is performed
enum struct join_op
{
INNER,
LEFT,
FULL
};
// Each element of the result will be an index into the left and right columns where
// left_columns[left_index] == right_columns[right_index]
using result_type = typename std::pair<int, int>;
// Define stream operator for a std::pair for conveinience of printing results.
// Needs to be in the std namespace to work with std::copy
namespace std{
template <typename first_t, typename second_t>
std::ostream& operator<<(std::ostream& os, std::pair<first_t, second_t> const & p)
{
os << p.first << "\t" << p.second;
std::cout << "\n";
return os;
}
}
struct NVCategoryJoinTest : public GdfTest
{
// Containers for the raw pointers to the gdf_columns that will be used as
// input to the gdf_join functions
std::vector<gdf_column*> gdf_raw_left_columns;
std::vector<gdf_column*> gdf_raw_right_columns;
std::vector<gdf_column*> gdf_raw_result_columns;
std::vector<std::string> left_string_column;
std::vector<std::string> right_string_column;
gdf_context ctxt{0, GDF_HASH, 0};
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Computes a reference solution for joining the left and right sets of columns
*
* @Param print Option to print the solution for debug
* @Param sort Option to sort the solution. This is necessary for comparison against the gdf solution
*
* @Returns A vector of 'result_type' where result_type is a structure with a left_index, right_index
* where left_columns[left_index] == right_columns[right_index]
*/
/* ----------------------------------------------------------------------------*/
std::vector<result_type> compute_reference_solution(join_op op, bool print = false, bool sort = true)
{
using key_type = std::string;
using value_type = size_t;
// Multimap used to compute the reference solution
std::multimap<key_type, value_type> the_map;
// Build hash table that maps the first right columns' values to their row index in the column
std::vector<key_type> const & build_column = right_string_column;
for(size_t right_index = 0; right_index < build_column.size(); ++right_index){
the_map.insert(std::make_pair(build_column[right_index], right_index));
}
std::vector<result_type> reference_result;
// Probe hash table with first left column
std::vector<key_type> const & probe_column = left_string_column;
for(size_t left_index = 0; left_index < probe_column.size(); ++left_index)
{
bool match{false};
// Find all keys that match probe_key
const auto probe_key = probe_column[left_index];
auto range = the_map.equal_range(probe_key);
// Every element in the returned range identifies a row in the first right column that
// matches the probe_key. Need to check if all other columns also match
for(auto i = range.first; i != range.second; ++i)
{
const auto right_index = i->second;
if(left_string_column[left_index] == right_string_column[right_index]){
reference_result.emplace_back(left_index, right_index);
match = true;
}
}
// For left joins, insert a NULL if no match is found
if((false == match) && ((op == join_op::LEFT) || (op == join_op::FULL))){
constexpr int JoinNullValue{-1};
reference_result.emplace_back(left_index, JoinNullValue);
}
}
if (op == join_op::FULL)
{
the_map.clear();
// Build hash table that maps the first left columns' values to their row index in the column
for(size_t left_index = 0; left_index < probe_column.size(); ++left_index){
the_map.insert(std::make_pair(probe_column[left_index], left_index));
}
// Probe the hash table with first right column
// Add rows where a match for the right column does not exist
for(size_t right_index = 0; right_index < build_column.size(); ++right_index)
{
const auto probe_key = build_column[right_index];
auto search = the_map.find(probe_key);
if ((search == the_map.end()))
{
constexpr int JoinNullValue{-1};
reference_result.emplace_back(JoinNullValue, right_index);
}
}
}
// Sort the result
if(sort)
{
std::sort(reference_result.begin(), reference_result.end());
}
if(print)
{
std::cout << "\nReference result size: " << reference_result.size() << std::endl;
std::cout << "left index, right index" << std::endl;
std::copy(reference_result.begin(), reference_result.end(), std::ostream_iterator<result_type>(std::cout, ""));
std::cout << "\n";
}
return reference_result;
}
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Computes the result of joining the left and right sets of columns with the libgdf functions
*
* @Param op The join operator
* @Param left_join_idx The vector of column indexes to join from left dataframe
* @Param right_join_idx The vector of column indexes to join from right dataframe
* @Param print Option to print the result computed by the libgdf function
* @Param sort Option to sort the result. This is required to compare the result against the reference solution
*/
/* ----------------------------------------------------------------------------*/
std::vector<result_type> compute_gdf_result(join_op op, std::vector<int> left_join_idx, std::vector<int> right_join_idx, bool print = false, bool sort = true, gdf_error expected_result = GDF_SUCCESS)
{
EXPECT_EQ(gdf_raw_left_columns.size(), gdf_raw_right_columns.size()) << "Mismatch columns size";
EXPECT_EQ(left_join_idx.size(), right_join_idx.size()) << "Mismatch join indexes size";
gdf_column left_result{};
gdf_column right_result{};
left_result.size = 0;
right_result.size = 0;
size_t num_columns = gdf_raw_left_columns.size();
size_t result_num_cols = gdf_raw_left_columns.size() + gdf_raw_right_columns.size() - left_join_idx.size();
gdf_error result_error{GDF_SUCCESS};
gdf_column ** left_gdf_columns = gdf_raw_left_columns.data();
gdf_column ** right_gdf_columns = gdf_raw_right_columns.data();
gdf_column ** result_columns = gdf_raw_result_columns.data();
switch(op)
{
case join_op::LEFT:
{
result_error = gdf_left_join(
left_gdf_columns, num_columns, left_join_idx.data(),
right_gdf_columns, num_columns, right_join_idx.data(),
left_join_idx.size(),
result_num_cols, result_columns,
&left_result, &right_result,
&ctxt);
break;
}
case join_op::INNER:
{
result_error = gdf_inner_join(
left_gdf_columns, num_columns, left_join_idx.data(),
right_gdf_columns, num_columns, right_join_idx.data(),
left_join_idx.size(),
result_num_cols, result_columns,
&left_result, &right_result,
&ctxt);
break;
}
case join_op::FULL:
{
result_error = gdf_full_join(
left_gdf_columns, num_columns, left_join_idx.data(),
right_gdf_columns, num_columns, right_join_idx.data(),
left_join_idx.size(),
result_num_cols, result_columns,
&left_result, &right_result,
&ctxt);
break;
}
default:
std::cout << "Invalid join method" << std::endl;
EXPECT_TRUE(false);
}
EXPECT_EQ(expected_result, result_error) << "The gdf join function did not complete successfully";
// If the expected result was not GDF_SUCCESS, then this test was testing for a
// specific error condition, in which case we return imediately and do not do
// any further work on the output
if(GDF_SUCCESS != expected_result){
return std::vector<result_type>();
}
EXPECT_EQ(left_result.size, right_result.size) << "Join output size mismatch";
// The output is an array of size `n` where the first n/2 elements are the
// left_indices and the last n/2 elements are the right indices
size_t total_pairs = left_result.size;
size_t output_size = total_pairs*2;
int * l_join_output = static_cast<int*>(left_result.data);
int * r_join_output = static_cast<int*>(right_result.data);
// Host vector to hold gdf join output
std::vector<int> host_result(output_size);
// Copy result of gdf join to the host
EXPECT_EQ(cudaMemcpy(host_result.data(),
l_join_output, total_pairs * sizeof(int), cudaMemcpyDeviceToHost), cudaSuccess);
EXPECT_EQ(cudaMemcpy(host_result.data() + total_pairs,
r_join_output, total_pairs * sizeof(int), cudaMemcpyDeviceToHost), cudaSuccess);
// Free the original join result
if(output_size > 0){
gdf_column_free(&left_result);
gdf_column_free(&right_result);
}
// Host vector of result_type pairs to hold final result for comparison to reference solution
std::vector<result_type> host_pair_result(total_pairs);
// Copy raw output into corresponding result_type pair
for(size_t i = 0; i < total_pairs; ++i){
host_pair_result[i].first = host_result[i];
host_pair_result[i].second = host_result[i + total_pairs];
}
// Sort the output for comparison to reference solution
if(sort){
std::sort(host_pair_result.begin(), host_pair_result.end());
}
if(print){
std::cout << "\nGDF result size: " << host_pair_result.size() << std::endl;
std::cout << "left index\tright index" << std::endl;
std::copy(host_pair_result.begin(), host_pair_result.end(), std::ostream_iterator<result_type>(std::cout, ""));
std::cout << "\n";
}
return host_pair_result;
}
void check_output(join_op op, std::vector<result_type>& reference_result, size_t length, bool print=false, bool sort=true){
gdf_column* result_column = gdf_raw_result_columns[0];
if(print){
std::cout<<"Raw string result:\n";
print_gdf_column(result_column);
}
size_t result_size = result_column->size;
if(result_size>0){
NVStrings * temp_strings = static_cast<NVCategory *>(result_column->dtype_info.category)->gather_strings(
(nv_category_index_type *) result_column->data, result_size , DEVICE_ALLOCATED );
char** host_strings = new char*[result_size];
for(size_t i=0;i<result_size;i++){
host_strings[i]=new char[length+1];
}
temp_strings->to_host(host_strings, 0, result_size);
for(size_t i=0;i<result_size;i++){
host_strings[i][length]=0;
}
std::vector<std::string> result_output = std::vector<std::string>(host_strings, host_strings + result_size);
std::vector<std::string> reference_output;
for(size_t i=0; i<result_size; i++){
if(reference_result[i].first != -1)
reference_output.push_back(left_string_column[reference_result[i].first]);
else
reference_output.push_back(right_string_column[reference_result[i].second]);
}
EXPECT_EQ(reference_output.size(), result_size);
if(sort){
std::sort(result_output.begin(), result_output.end());
std::sort(reference_output.begin(), reference_output.end());
}
if(print){
for(auto str : result_output){
std::cout<<str<<"\t";
}
std::cout<<std::endl;
}
NVStrings::destroy(temp_strings);
for(size_t i = 0; i < result_size; i++){
delete host_strings[i];
}
delete host_strings;
for(size_t i=0; i<result_size; i++){
EXPECT_EQ(reference_output[i], result_output[i]);
}
}
}
};
TEST_F(NVCategoryJoinTest, join_test){
bool print = false;
size_t rows_size = 64;
join_op op = join_op::INNER;
size_t length = 1;
const char ** left_string_data = cudf::test::generate_string_data(rows_size, length, print);
const char ** right_string_data = cudf::test::generate_string_data(rows_size, length, print);
left_string_column = std::vector<std::string> (left_string_data, left_string_data + rows_size);
right_string_column = std::vector<std::string> (right_string_data, right_string_data + rows_size);
gdf_column * left_column = cudf::test::create_nv_category_column_strings(left_string_data, rows_size);
gdf_column * right_column = cudf::test::create_nv_category_column_strings(right_string_data, rows_size);
gdf_column * result_column = cudf::test::create_nv_category_column_strings(right_string_data, rows_size);
if(print){
std::cout<<"Raw string indexes:\n";
print_gdf_column(left_column);
print_gdf_column(right_column);
}
gdf_raw_left_columns.push_back(left_column);
gdf_raw_right_columns.push_back(right_column);
gdf_raw_result_columns.push_back(result_column);
std::vector<result_type> reference_result = this->compute_reference_solution(op, print);
std::vector<int> left_join_idx={0};
std::vector<int> right_join_idx={0};
std::vector<result_type> gdf_result = this->compute_gdf_result(op, left_join_idx, right_join_idx, print);
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
this->check_output(op, reference_result, length, print);
}
TEST_F(NVCategoryJoinTest, join_test_nulls){
bool print = false;
size_t rows_size = 16;
join_op op = join_op::INNER;
size_t length = 1;
const char ** left_string_data = cudf::test::generate_string_data(rows_size, length, print);
const char ** right_string_data = cudf::test::generate_string_data(rows_size, length, print);
left_string_column = std::vector<std::string> (left_string_data, left_string_data + rows_size);
right_string_column = std::vector<std::string> (right_string_data, right_string_data + rows_size);
gdf_column * left_column = cudf::test::create_nv_category_column_strings(left_string_data, rows_size);
gdf_column * right_column = cudf::test::create_nv_category_column_strings(right_string_data, rows_size);
left_column->valid = nullptr;
right_column->valid = nullptr;
if(print){
std::cout<<"Raw string indexes:\n";
print_gdf_column(left_column);
print_gdf_column(right_column);
}
gdf_raw_left_columns.push_back(left_column);
gdf_raw_right_columns.push_back(right_column);
std::vector<result_type> reference_result = this->compute_reference_solution(op, print);
std::vector<int> left_join_idx={0};
std::vector<int> right_join_idx={0};
std::vector<result_type> gdf_result = this->compute_gdf_result(op, left_join_idx, right_join_idx, print);
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
}
TEST_F(NVCategoryJoinTest, join_test_bug){
bool print = false;
join_op op = join_op::LEFT;
const size_t left_size = 3;
const char *column_left_b[] = {"one ", "two ", "NO MATCH"};
int column_left_a[] = { 5, 14, 8 };
const size_t right_size = 2;
const char *column_right_b[] = {"two ", "one "};
int column_left_c[] = { 0, 1 };
left_string_column = std::vector<std::string> (column_left_b, column_left_b + left_size);
right_string_column = std::vector<std::string> (column_right_b, column_right_b + right_size);
gdf_column * left_column = cudf::test::create_nv_category_column_strings(column_left_b, left_size);
left_column->valid = nullptr;
gdf_column * left_non_join_column = create_column_ints(column_left_a, left_size);
left_non_join_column ->valid = nullptr;
gdf_column * right_column = cudf::test::create_nv_category_column_strings(column_right_b, right_size);
right_column->valid = nullptr;
gdf_column * right_non_join_column = create_column_ints(column_left_c, right_size);
right_non_join_column->valid = nullptr;
left_column->valid = nullptr;
right_column->valid = nullptr;
if(print){
std::cout<<"Raw string indexes:\n";
print_gdf_column(left_column);
print_gdf_column(right_column);
}
gdf_raw_left_columns.push_back(left_non_join_column);
gdf_raw_left_columns.push_back(left_column);
gdf_raw_right_columns.push_back(right_non_join_column);
gdf_raw_right_columns.push_back(right_column);
gdf_column * result_column_nonjoin_left = create_column_ints(column_left_a, left_size);
gdf_column * result_column_nonjoin_right = create_column_ints(column_left_a, left_size);
gdf_column * result_column_joined = cudf::test::create_nv_category_column_strings(column_left_b, left_size);
gdf_raw_result_columns.push_back(result_column_nonjoin_left);
gdf_raw_result_columns.push_back(result_column_joined);
gdf_raw_result_columns.push_back(result_column_nonjoin_right);
std::vector<result_type> reference_result = this->compute_reference_solution(op, print);
std::vector<int> left_join_idx={1};
std::vector<int> right_join_idx={1};
std::vector<result_type> gdf_result = this->compute_gdf_result(op, left_join_idx, right_join_idx, print);
ASSERT_EQ(reference_result.size(), gdf_result.size()) << "Size of gdf result does not match reference result\n";
// Compare the GDF and reference solutions
for(size_t i = 0; i < reference_result.size(); ++i){
EXPECT_EQ(reference_result[i], gdf_result[i]);
}
if(print){
std::cout<<"Output columns:\n";
for(size_t i=0; i<gdf_raw_result_columns.size(); i++){
print_gdf_column(gdf_raw_result_columns[i]);
std::cout<<"\n-----\n";
}
}
}
|
74050b22a361f6e8a96e43e152478f1ba71ed085.hip | // !!! This is a file automatically generated by hipify!!!
#include <THH/THHTensorMathCompareT.cuh>
#include <THH/THHTensor.hpp>
#include <THH/generic/THHTensorMathCompareT.hip>
#include <THH/THHGenerateByteType.h>
| 74050b22a361f6e8a96e43e152478f1ba71ed085.cu | #include <THC/THCTensorMathCompareT.cuh>
#include <THC/THCTensor.hpp>
#include <THC/generic/THCTensorMathCompareT.cu>
#include <THC/THCGenerateByteType.h>
|
51f1274fdaa596427001a9094f44d399e1f44f53.hip | // !!! This is a file automatically generated by hipify!!!
/*-----------------------------------------------------------
** gaussian.cu -- The program is to solve a linear system Ax = b
** by using Gaussian Elimination. The algorithm on page 101
** ("Foundations of Parallel Programming") is used.
** The sequential version is gaussian.c. This parallel
** implementation converts three independent for() loops
** into three Fans. Use the data file ge_3.dat to verify
** the correction of the output.
**
** Written by Andreas Kura, 02/15/95
** Modified by Chong-wei Xu, 04/20/95
** Modified by Chris Gregg for CUDA, 07/20/2009
**-----------------------------------------------------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include "hip/hip_runtime.h"
#include <string.h>
#include <math.h>
#define MAXBLOCKSIZE 512
int Size;
float *a, *b, *finalVec;
float *m;
FILE *fp;
void InitProblemOnce(char *filename);
void InitPerRun();
void ForwardSub();
void BackSub();
__global__ void Fan1(float *m, float *a, int Size, int t);
__global__ void Fan2(float *m, float *a, float *b,int Size, int j1, int t);
void InitMat(float *ary, int nrow, int ncol);
void InitAry(float *ary, int ary_size);
void PrintMat(float *ary, int nrow, int ncolumn);
void PrintAry(float *ary, int ary_size);
void PrintDeviceProperties();
void checkCUDAError(const char *msg);
unsigned int totalKernelTime = 0;
// create both matrix and right hand side, Ke Wang 2013/08/12 11:51:06
void
create_matrix(float *m, int size){
int i,j;
float lamda = -0.01;
float coe[2*size-1];
float coe_i =0.0;
for (i=0; i < size; i++)
{
coe_i = 10*exp(lamda*i);
j=size-1+i;
coe[j]=coe_i;
j=size-1-i;
coe[j]=coe_i;
}
for (i=0; i < size; i++) {
for (j=0; j < size; j++) {
m[i*size+j]=coe[size-1-i+j];
}
}
}
int main(int argc, char *argv[])
{
int verbose = 1;
int i, j;
char flag;
if (argc < 2) {
printf("Usage: gaussian -f filename / -s size [-q]\n\n");
printf("-q (quiet) suppresses printing the matrix and result values.\n");
printf("-f (filename) path of input file\n");
printf("-s (size) size of matrix. Create matrix and rhs in this program \n");
printf("The first line of the file contains the dimension of the matrix, n.");
printf("The second line of the file is a newline.\n");
printf("The next n lines contain n tab separated values for the matrix.");
printf("The next line of the file is a newline.\n");
printf("The next line of the file is a 1xn vector with tab separated values.\n");
printf("The next line of the file is a newline. (optional)\n");
printf("The final line of the file is the pre-computed solution. (optional)\n");
printf("Example: matrix4.txt:\n");
printf("4\n");
printf("\n");
printf("-0.6 -0.5 0.7 0.3\n");
printf("-0.3 -0.9 0.3 0.7\n");
printf("-0.4 -0.5 -0.3 -0.8\n");
printf("0.0 -0.1 0.2 0.9\n");
printf("\n");
printf("-0.85 -0.68 0.24 -0.53\n");
printf("\n");
printf("0.7 0.0 -0.4 -0.5\n");
exit(0);
}
//PrintDeviceProperties();
//char filename[100];
//sprintf(filename,"matrices/matrix%d.txt",size);
for(i=1;i<argc;i++) {
if (argv[i][0]=='-') {// flag
flag = argv[i][1];
switch (flag) {
case 's': // platform
i++;
Size = atoi(argv[i]);
printf("Create matrix internally in parse, size = %d \n", Size);
a = (float *) malloc(Size * Size * sizeof(float));
create_matrix(a, Size);
b = (float *) malloc(Size * sizeof(float));
for (j =0; j< Size; j++)
b[j]=1.0;
m = (float *) malloc(Size * Size * sizeof(float));
break;
case 'f': // platform
i++;
//printf("Read file from %s \n", argv[i]);
InitProblemOnce(argv[i]);
break;
case 'q': // quiet
verbose = 0;
break;
}
}
}
//InitProblemOnce(filename);
InitPerRun();
//begin timing
struct timeval time_start;
gettimeofday(&time_start, NULL);
// run kernels
ForwardSub();
//end timing
struct timeval time_end;
gettimeofday(&time_end, NULL);
unsigned int time_total = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec);
if (verbose) {
//printf("Matrix m is: \n");
PrintMat(m, Size, Size);
//printf("Matrix a is: \n");
PrintMat(a, Size, Size);
//printf("Array b is: \n");
PrintAry(b, Size);
}
BackSub();
if (verbose) {
//printf("The final solution is: \n");
PrintAry(finalVec,Size);
}
fprintf(stderr, "Time total (including memory transfers)\t%f sec\n", time_total * 1e-6);
//printf("Time for CUDA kernels:\t%f sec\n",totalKernelTime * 1e-6);
/*printf("%d,%d\n",size,time_total);
fprintf(stderr,"%d,%d\n",size,time_total);*/
free(m);
free(a);
free(b);
}
/*------------------------------------------------------
** PrintDeviceProperties
**-----------------------------------------------------
*/
void PrintDeviceProperties(){
hipDeviceProp_t deviceProp;
int nDevCount = 0;
hipGetDeviceCount( &nDevCount );
printf( "Total Device found: %d", nDevCount );
for (int nDeviceIdx = 0; nDeviceIdx < nDevCount; ++nDeviceIdx )
{
memset( &deviceProp, 0, sizeof(deviceProp));
if( hipSuccess == hipGetDeviceProperties(&deviceProp, nDeviceIdx))
{
printf( "\nDevice Name \t\t - %s ", deviceProp.name );
printf( "\n**************************************");
printf( "\nTotal Global Memory\t\t\t - %lu KB", deviceProp.totalGlobalMem/1024 );
printf( "\nShared memory available per block \t - %lu KB", deviceProp.sharedMemPerBlock/1024 );
printf( "\nNumber of registers per thread block \t - %d", deviceProp.regsPerBlock );
printf( "\nWarp size in threads \t\t\t - %d", deviceProp.warpSize );
printf( "\nMemory Pitch \t\t\t\t - %zu bytes", deviceProp.memPitch );
printf( "\nMaximum threads per block \t\t - %d", deviceProp.maxThreadsPerBlock );
printf( "\nMaximum Thread Dimension (block) \t - %d %d %d", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2] );
printf( "\nMaximum Thread Dimension (grid) \t - %d %d %d", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2] );
printf( "\nTotal constant memory \t\t\t - %zu bytes", deviceProp.totalConstMem );
printf( "\nCUDA ver \t\t\t\t - %d.%d", deviceProp.major, deviceProp.minor );
printf( "\nClock rate \t\t\t\t - %d KHz", deviceProp.clockRate );
printf( "\nTexture Alignment \t\t\t - %zu bytes", deviceProp.textureAlignment );
printf( "\nDevice Overlap \t\t\t\t - %s", deviceProp. deviceOverlap?"Allowed":"Not Allowed" );
printf( "\nNumber of Multi processors \t\t - %d\n\n", deviceProp.multiProcessorCount );
}
else
printf( "\n%s", hipGetErrorString(hipGetLastError()));
}
}
/*------------------------------------------------------
** InitProblemOnce -- Initialize all of matrices and
** vectors by opening a data file specified by the user.
**
** We used dynamic array *a, *b, and *m to allocate
** the memory storages.
**------------------------------------------------------
*/
void InitProblemOnce(char *filename)
{
//char *filename = argv[1];
//printf("Enter the data file name: ");
//scanf("%s", filename);
//printf("The file name is: %s\n", filename);
fp = fopen(filename, "r");
fscanf(fp, "%d", &Size);
a = (float *) malloc(Size * Size * sizeof(float));
InitMat(a, Size, Size);
//printf("The input matrix a is:\n");
//PrintMat(a, Size, Size);
b = (float *) malloc(Size * sizeof(float));
InitAry(b, Size);
//printf("The input array b is:\n");
//PrintAry(b, Size);
m = (float *) malloc(Size * Size * sizeof(float));
}
/*------------------------------------------------------
** InitPerRun() -- Initialize the contents of the
** multipier matrix **m
**------------------------------------------------------
*/
void InitPerRun()
{
int i;
for (i=0; i<Size*Size; i++)
*(m+i) = 0.0;
}
/*-------------------------------------------------------
** Fan1() -- Calculate multiplier matrix
** Pay attention to the index. Index i give the range
** which starts from 0 to range-1. The real values of
** the index should be adjust and related with the value
** of t which is defined on the ForwardSub().
**-------------------------------------------------------
*/
__global__ void Fan1(float *m_cuda, float *a_cuda, int Size, int t)
{
//if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) printf(".");
//printf("blockIDx.x:%d,threadIdx.x:%d,Size:%d,t:%d,Size-1-t:%d\n",blockIdx.x,threadIdx.x,Size,t,Size-1-t);
float dmnt = *(a_cuda+Size*t+t);
if(threadIdx.x + blockIdx.x * blockDim.x < Size-1-t)
*(m_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) = *(a_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) / dmnt;
}
/*-------------------------------------------------------
** Fan2() -- Modify the matrix A into LUD
**-------------------------------------------------------
*/
__global__ void Fan2(float *m_cuda, float *a_cuda, float *b_cuda,int Size, int j1, int t)
{
if(threadIdx.x + blockIdx.x * blockDim.x < Size-1-t) {
if(threadIdx.y + blockIdx.y * blockDim.y < Size-t) {
int xidx = blockIdx.x * blockDim.x + threadIdx.x;
int yidx = blockIdx.y * blockDim.y + threadIdx.y;
//printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y);
a_cuda[Size*(xidx+1+t)+(yidx+t)] -= m_cuda[Size*(xidx+1+t)+t] * a_cuda[Size*t+(yidx+t)];
//a_cuda[xidx+1+t][yidx+t] -= m_cuda[xidx+1+t][t] * a_cuda[t][yidx+t];
if(yidx == 0){
//printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y);
//printf("xidx:%d,yidx:%d\n",xidx,yidx);
b_cuda[xidx+1+t] -= m_cuda[Size*(xidx+1+t)+(yidx+t)] * b_cuda[t];
}
}
}
}
/*------------------------------------------------------
** ForwardSub() -- Forward substitution of Gaussian
** elimination.
**------------------------------------------------------
*/
void ForwardSub()
{
int t;
float *m_cuda,*a_cuda,*b_cuda;
// allocate memory on GPU
hipMalloc((void **) &m_cuda, Size * Size * sizeof(float));
hipMalloc((void **) &a_cuda, Size * Size * sizeof(float));
hipMalloc((void **) &b_cuda, Size * sizeof(float));
// copy memory to GPU
hipMemcpy(m_cuda, m, Size * Size * sizeof(float),hipMemcpyHostToDevice );
hipMemcpy(a_cuda, a, Size * Size * sizeof(float),hipMemcpyHostToDevice );
hipMemcpy(b_cuda, b, Size * sizeof(float),hipMemcpyHostToDevice );
int block_size,grid_size;
block_size = MAXBLOCKSIZE;
grid_size = (Size/block_size) + (!(Size%block_size)? 0:1);
//printf("1d grid size: %d\n",grid_size);
dim3 dimBlock(block_size);
dim3 dimGrid(grid_size);
//dim3 dimGrid( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1) );
int blockSize2d, gridSize2d;
blockSize2d = 4;
gridSize2d = (Size/blockSize2d) + (!(Size%blockSize2d?0:1));
dim3 dimBlockXY(blockSize2d,blockSize2d);
dim3 dimGridXY(gridSize2d,gridSize2d);
// begin timing kernels
struct timeval time_start;
gettimeofday(&time_start, NULL);
for (t=0; t<Size-1; t++) {
hipLaunchKernelGGL(( Fan1), dim3(dimGrid),dim3(dimBlock), 0, 0, m_cuda,a_cuda,Size,t);
hipDeviceSynchronize();
hipLaunchKernelGGL(( Fan2), dim3(dimGridXY),dim3(dimBlockXY), 0, 0, m_cuda,a_cuda,b_cuda,Size,Size-t,t);
hipDeviceSynchronize();
checkCUDAError("Fan2");
}
// end timing kernels
struct timeval time_end;
gettimeofday(&time_end, NULL);
totalKernelTime = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec);
// copy memory back to CPU
hipMemcpy(m, m_cuda, Size * Size * sizeof(float),hipMemcpyDeviceToHost );
hipMemcpy(a, a_cuda, Size * Size * sizeof(float),hipMemcpyDeviceToHost );
hipMemcpy(b, b_cuda, Size * sizeof(float),hipMemcpyDeviceToHost );
hipFree(m_cuda);
hipFree(a_cuda);
hipFree(b_cuda);
}
/*------------------------------------------------------
** BackSub() -- Backward substitution
**------------------------------------------------------
*/
void BackSub()
{
// create a new vector to hold the final answer
finalVec = (float *) malloc(Size * sizeof(float));
// solve "bottom up"
int i,j;
for(i=0;i<Size;i++){
finalVec[Size-i-1]=b[Size-i-1];
for(j=0;j<i;j++)
{
finalVec[Size-i-1]-=*(a+Size*(Size-i-1)+(Size-j-1)) * finalVec[Size-j-1];
}
finalVec[Size-i-1]=finalVec[Size-i-1]/ *(a+Size*(Size-i-1)+(Size-i-1));
}
}
void InitMat(float *ary, int nrow, int ncol)
{
int i, j;
for (i=0; i<nrow; i++) {
for (j=0; j<ncol; j++) {
fscanf(fp, "%f", ary+Size*i+j);
}
}
}
/*------------------------------------------------------
** PrintMat() -- Print the contents of the matrix
**------------------------------------------------------
*/
void PrintMat(float *ary, int nrow, int ncol)
{
int i, j;
float e = 1e-6;
for (i=0; i<nrow; i++) {
for (j=0; j<ncol; j++) {
if (*(ary+Size*i+j) < e && *(ary+Size*i+j) > -e)
*(ary+Size*i+j) = 0.00;
printf("%f ", *(ary+Size*i+j));
}
printf("\n");
}
printf("\n");
}
/*------------------------------------------------------
** InitAry() -- Initialize the array (vector) by reading
** data from the data file
**------------------------------------------------------
*/
void InitAry(float *ary, int ary_size)
{
int i;
for (i=0; i<ary_size; i++) {
fscanf(fp, "%f", &ary[i]);
}
}
/*------------------------------------------------------
** PrintAry() -- Print the contents of the array (vector)
**------------------------------------------------------
*/
void PrintAry(float *ary, int ary_size)
{
int i;
float e = 1e-6;
for (i=0; i<ary_size; i++) {
if (ary[i] < e && ary[i] > -e)
ary[i] = 0.00;
printf("%f ", ary[i]);
}
printf("\n\n");
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
| 51f1274fdaa596427001a9094f44d399e1f44f53.cu | /*-----------------------------------------------------------
** gaussian.cu -- The program is to solve a linear system Ax = b
** by using Gaussian Elimination. The algorithm on page 101
** ("Foundations of Parallel Programming") is used.
** The sequential version is gaussian.c. This parallel
** implementation converts three independent for() loops
** into three Fans. Use the data file ge_3.dat to verify
** the correction of the output.
**
** Written by Andreas Kura, 02/15/95
** Modified by Chong-wei Xu, 04/20/95
** Modified by Chris Gregg for CUDA, 07/20/2009
**-----------------------------------------------------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include "cuda.h"
#include <string.h>
#include <math.h>
#define MAXBLOCKSIZE 512
int Size;
float *a, *b, *finalVec;
float *m;
FILE *fp;
void InitProblemOnce(char *filename);
void InitPerRun();
void ForwardSub();
void BackSub();
__global__ void Fan1(float *m, float *a, int Size, int t);
__global__ void Fan2(float *m, float *a, float *b,int Size, int j1, int t);
void InitMat(float *ary, int nrow, int ncol);
void InitAry(float *ary, int ary_size);
void PrintMat(float *ary, int nrow, int ncolumn);
void PrintAry(float *ary, int ary_size);
void PrintDeviceProperties();
void checkCUDAError(const char *msg);
unsigned int totalKernelTime = 0;
// create both matrix and right hand side, Ke Wang 2013/08/12 11:51:06
void
create_matrix(float *m, int size){
int i,j;
float lamda = -0.01;
float coe[2*size-1];
float coe_i =0.0;
for (i=0; i < size; i++)
{
coe_i = 10*exp(lamda*i);
j=size-1+i;
coe[j]=coe_i;
j=size-1-i;
coe[j]=coe_i;
}
for (i=0; i < size; i++) {
for (j=0; j < size; j++) {
m[i*size+j]=coe[size-1-i+j];
}
}
}
int main(int argc, char *argv[])
{
int verbose = 1;
int i, j;
char flag;
if (argc < 2) {
printf("Usage: gaussian -f filename / -s size [-q]\n\n");
printf("-q (quiet) suppresses printing the matrix and result values.\n");
printf("-f (filename) path of input file\n");
printf("-s (size) size of matrix. Create matrix and rhs in this program \n");
printf("The first line of the file contains the dimension of the matrix, n.");
printf("The second line of the file is a newline.\n");
printf("The next n lines contain n tab separated values for the matrix.");
printf("The next line of the file is a newline.\n");
printf("The next line of the file is a 1xn vector with tab separated values.\n");
printf("The next line of the file is a newline. (optional)\n");
printf("The final line of the file is the pre-computed solution. (optional)\n");
printf("Example: matrix4.txt:\n");
printf("4\n");
printf("\n");
printf("-0.6 -0.5 0.7 0.3\n");
printf("-0.3 -0.9 0.3 0.7\n");
printf("-0.4 -0.5 -0.3 -0.8\n");
printf("0.0 -0.1 0.2 0.9\n");
printf("\n");
printf("-0.85 -0.68 0.24 -0.53\n");
printf("\n");
printf("0.7 0.0 -0.4 -0.5\n");
exit(0);
}
//PrintDeviceProperties();
//char filename[100];
//sprintf(filename,"matrices/matrix%d.txt",size);
for(i=1;i<argc;i++) {
if (argv[i][0]=='-') {// flag
flag = argv[i][1];
switch (flag) {
case 's': // platform
i++;
Size = atoi(argv[i]);
printf("Create matrix internally in parse, size = %d \n", Size);
a = (float *) malloc(Size * Size * sizeof(float));
create_matrix(a, Size);
b = (float *) malloc(Size * sizeof(float));
for (j =0; j< Size; j++)
b[j]=1.0;
m = (float *) malloc(Size * Size * sizeof(float));
break;
case 'f': // platform
i++;
//printf("Read file from %s \n", argv[i]);
InitProblemOnce(argv[i]);
break;
case 'q': // quiet
verbose = 0;
break;
}
}
}
//InitProblemOnce(filename);
InitPerRun();
//begin timing
struct timeval time_start;
gettimeofday(&time_start, NULL);
// run kernels
ForwardSub();
//end timing
struct timeval time_end;
gettimeofday(&time_end, NULL);
unsigned int time_total = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec);
if (verbose) {
//printf("Matrix m is: \n");
PrintMat(m, Size, Size);
//printf("Matrix a is: \n");
PrintMat(a, Size, Size);
//printf("Array b is: \n");
PrintAry(b, Size);
}
BackSub();
if (verbose) {
//printf("The final solution is: \n");
PrintAry(finalVec,Size);
}
fprintf(stderr, "Time total (including memory transfers)\t%f sec\n", time_total * 1e-6);
//printf("Time for CUDA kernels:\t%f sec\n",totalKernelTime * 1e-6);
/*printf("%d,%d\n",size,time_total);
fprintf(stderr,"%d,%d\n",size,time_total);*/
free(m);
free(a);
free(b);
}
/*------------------------------------------------------
** PrintDeviceProperties
**-----------------------------------------------------
*/
void PrintDeviceProperties(){
cudaDeviceProp deviceProp;
int nDevCount = 0;
cudaGetDeviceCount( &nDevCount );
printf( "Total Device found: %d", nDevCount );
for (int nDeviceIdx = 0; nDeviceIdx < nDevCount; ++nDeviceIdx )
{
memset( &deviceProp, 0, sizeof(deviceProp));
if( cudaSuccess == cudaGetDeviceProperties(&deviceProp, nDeviceIdx))
{
printf( "\nDevice Name \t\t - %s ", deviceProp.name );
printf( "\n**************************************");
printf( "\nTotal Global Memory\t\t\t - %lu KB", deviceProp.totalGlobalMem/1024 );
printf( "\nShared memory available per block \t - %lu KB", deviceProp.sharedMemPerBlock/1024 );
printf( "\nNumber of registers per thread block \t - %d", deviceProp.regsPerBlock );
printf( "\nWarp size in threads \t\t\t - %d", deviceProp.warpSize );
printf( "\nMemory Pitch \t\t\t\t - %zu bytes", deviceProp.memPitch );
printf( "\nMaximum threads per block \t\t - %d", deviceProp.maxThreadsPerBlock );
printf( "\nMaximum Thread Dimension (block) \t - %d %d %d", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2] );
printf( "\nMaximum Thread Dimension (grid) \t - %d %d %d", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2] );
printf( "\nTotal constant memory \t\t\t - %zu bytes", deviceProp.totalConstMem );
printf( "\nCUDA ver \t\t\t\t - %d.%d", deviceProp.major, deviceProp.minor );
printf( "\nClock rate \t\t\t\t - %d KHz", deviceProp.clockRate );
printf( "\nTexture Alignment \t\t\t - %zu bytes", deviceProp.textureAlignment );
printf( "\nDevice Overlap \t\t\t\t - %s", deviceProp. deviceOverlap?"Allowed":"Not Allowed" );
printf( "\nNumber of Multi processors \t\t - %d\n\n", deviceProp.multiProcessorCount );
}
else
printf( "\n%s", cudaGetErrorString(cudaGetLastError()));
}
}
/*------------------------------------------------------
** InitProblemOnce -- Initialize all of matrices and
** vectors by opening a data file specified by the user.
**
** We used dynamic array *a, *b, and *m to allocate
** the memory storages.
**------------------------------------------------------
*/
void InitProblemOnce(char *filename)
{
//char *filename = argv[1];
//printf("Enter the data file name: ");
//scanf("%s", filename);
//printf("The file name is: %s\n", filename);
fp = fopen(filename, "r");
fscanf(fp, "%d", &Size);
a = (float *) malloc(Size * Size * sizeof(float));
InitMat(a, Size, Size);
//printf("The input matrix a is:\n");
//PrintMat(a, Size, Size);
b = (float *) malloc(Size * sizeof(float));
InitAry(b, Size);
//printf("The input array b is:\n");
//PrintAry(b, Size);
m = (float *) malloc(Size * Size * sizeof(float));
}
/*------------------------------------------------------
** InitPerRun() -- Initialize the contents of the
** multipier matrix **m
**------------------------------------------------------
*/
void InitPerRun()
{
int i;
for (i=0; i<Size*Size; i++)
*(m+i) = 0.0;
}
/*-------------------------------------------------------
** Fan1() -- Calculate multiplier matrix
** Pay attention to the index. Index i give the range
** which starts from 0 to range-1. The real values of
** the index should be adjust and related with the value
** of t which is defined on the ForwardSub().
**-------------------------------------------------------
*/
__global__ void Fan1(float *m_cuda, float *a_cuda, int Size, int t)
{
//if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) printf(".");
//printf("blockIDx.x:%d,threadIdx.x:%d,Size:%d,t:%d,Size-1-t:%d\n",blockIdx.x,threadIdx.x,Size,t,Size-1-t);
float dmnt = *(a_cuda+Size*t+t);
if(threadIdx.x + blockIdx.x * blockDim.x < Size-1-t)
*(m_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) = *(a_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) / dmnt;
}
/*-------------------------------------------------------
** Fan2() -- Modify the matrix A into LUD
**-------------------------------------------------------
*/
__global__ void Fan2(float *m_cuda, float *a_cuda, float *b_cuda,int Size, int j1, int t)
{
if(threadIdx.x + blockIdx.x * blockDim.x < Size-1-t) {
if(threadIdx.y + blockIdx.y * blockDim.y < Size-t) {
int xidx = blockIdx.x * blockDim.x + threadIdx.x;
int yidx = blockIdx.y * blockDim.y + threadIdx.y;
//printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y);
a_cuda[Size*(xidx+1+t)+(yidx+t)] -= m_cuda[Size*(xidx+1+t)+t] * a_cuda[Size*t+(yidx+t)];
//a_cuda[xidx+1+t][yidx+t] -= m_cuda[xidx+1+t][t] * a_cuda[t][yidx+t];
if(yidx == 0){
//printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y);
//printf("xidx:%d,yidx:%d\n",xidx,yidx);
b_cuda[xidx+1+t] -= m_cuda[Size*(xidx+1+t)+(yidx+t)] * b_cuda[t];
}
}
}
}
/*------------------------------------------------------
** ForwardSub() -- Forward substitution of Gaussian
** elimination.
**------------------------------------------------------
*/
void ForwardSub()
{
int t;
float *m_cuda,*a_cuda,*b_cuda;
// allocate memory on GPU
cudaMalloc((void **) &m_cuda, Size * Size * sizeof(float));
cudaMalloc((void **) &a_cuda, Size * Size * sizeof(float));
cudaMalloc((void **) &b_cuda, Size * sizeof(float));
// copy memory to GPU
cudaMemcpy(m_cuda, m, Size * Size * sizeof(float),cudaMemcpyHostToDevice );
cudaMemcpy(a_cuda, a, Size * Size * sizeof(float),cudaMemcpyHostToDevice );
cudaMemcpy(b_cuda, b, Size * sizeof(float),cudaMemcpyHostToDevice );
int block_size,grid_size;
block_size = MAXBLOCKSIZE;
grid_size = (Size/block_size) + (!(Size%block_size)? 0:1);
//printf("1d grid size: %d\n",grid_size);
dim3 dimBlock(block_size);
dim3 dimGrid(grid_size);
//dim3 dimGrid( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1) );
int blockSize2d, gridSize2d;
blockSize2d = 4;
gridSize2d = (Size/blockSize2d) + (!(Size%blockSize2d?0:1));
dim3 dimBlockXY(blockSize2d,blockSize2d);
dim3 dimGridXY(gridSize2d,gridSize2d);
// begin timing kernels
struct timeval time_start;
gettimeofday(&time_start, NULL);
for (t=0; t<Size-1; t++) {
Fan1<<<dimGrid,dimBlock>>>(m_cuda,a_cuda,Size,t);
cudaThreadSynchronize();
Fan2<<<dimGridXY,dimBlockXY>>>(m_cuda,a_cuda,b_cuda,Size,Size-t,t);
cudaThreadSynchronize();
checkCUDAError("Fan2");
}
// end timing kernels
struct timeval time_end;
gettimeofday(&time_end, NULL);
totalKernelTime = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec);
// copy memory back to CPU
cudaMemcpy(m, m_cuda, Size * Size * sizeof(float),cudaMemcpyDeviceToHost );
cudaMemcpy(a, a_cuda, Size * Size * sizeof(float),cudaMemcpyDeviceToHost );
cudaMemcpy(b, b_cuda, Size * sizeof(float),cudaMemcpyDeviceToHost );
cudaFree(m_cuda);
cudaFree(a_cuda);
cudaFree(b_cuda);
}
/*------------------------------------------------------
** BackSub() -- Backward substitution
**------------------------------------------------------
*/
void BackSub()
{
// create a new vector to hold the final answer
finalVec = (float *) malloc(Size * sizeof(float));
// solve "bottom up"
int i,j;
for(i=0;i<Size;i++){
finalVec[Size-i-1]=b[Size-i-1];
for(j=0;j<i;j++)
{
finalVec[Size-i-1]-=*(a+Size*(Size-i-1)+(Size-j-1)) * finalVec[Size-j-1];
}
finalVec[Size-i-1]=finalVec[Size-i-1]/ *(a+Size*(Size-i-1)+(Size-i-1));
}
}
void InitMat(float *ary, int nrow, int ncol)
{
int i, j;
for (i=0; i<nrow; i++) {
for (j=0; j<ncol; j++) {
fscanf(fp, "%f", ary+Size*i+j);
}
}
}
/*------------------------------------------------------
** PrintMat() -- Print the contents of the matrix
**------------------------------------------------------
*/
void PrintMat(float *ary, int nrow, int ncol)
{
int i, j;
float e = 1e-6;
for (i=0; i<nrow; i++) {
for (j=0; j<ncol; j++) {
if (*(ary+Size*i+j) < e && *(ary+Size*i+j) > -e)
*(ary+Size*i+j) = 0.00;
printf("%f ", *(ary+Size*i+j));
}
printf("\n");
}
printf("\n");
}
/*------------------------------------------------------
** InitAry() -- Initialize the array (vector) by reading
** data from the data file
**------------------------------------------------------
*/
void InitAry(float *ary, int ary_size)
{
int i;
for (i=0; i<ary_size; i++) {
fscanf(fp, "%f", &ary[i]);
}
}
/*------------------------------------------------------
** PrintAry() -- Print the contents of the array (vector)
**------------------------------------------------------
*/
void PrintAry(float *ary, int ary_size)
{
int i;
float e = 1e-6;
for (i=0; i<ary_size; i++) {
if (ary[i] < e && ary[i] > -e)
ary[i] = 0.00;
printf("%f ", ary[i]);
}
printf("\n\n");
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
f4d34887ecea7dcb2a6b50193d1aa87699e56009.hip | // !!! This is a file automatically generated by hipify!!!
#include <cupy/type_dispatcher.cuh>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/tuple.h>
#include <thrust/execution_policy.h>
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
// This is used to avoid a problem with constexpr in functions declarations introduced in
// cuda 11.2, MSVC 15 does not fully support it so we need a dummy constexpr declaration
// that is provided by this header. However optional.h is only available
// starting CUDA 10.1
#include <thrust/optional.h>
#ifdef _MSC_VER
#define THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS constexpr
#else
#define THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS THRUST_OPTIONAL_CPP11_CONSTEXPR
#endif
#endif
#include "cupy_thrust.h"
#if CUPY_USE_HIP
typedef hipStream_t hipStream_t;
namespace cuda {
using thrust::hip::par;
}
#else // #if CUPY_USE_HIP
namespace cuda {
using thrust::hip::par;
}
#endif // #if CUPY_USE_HIP
extern "C" char *cupy_malloc(void *, size_t);
extern "C" void cupy_free(void *, char *);
class cupy_allocator {
private:
void* memory;
public:
typedef char value_type;
cupy_allocator(void* memory) : memory(memory) {}
char *allocate(size_t num_bytes) {
return cupy_malloc(memory, num_bytes);
}
void deallocate(char *ptr, size_t n) {
cupy_free(memory, ptr);
}
};
/*
* ------------------------------------- Minimum boilerplate for NumPy compatibility --------------------------------------
* We need a specialized operator< here in order to match the NumPy behavior:
* "The sort order for complex numbers is lexicographic. If both the real and imaginary parts are non-nan then the order is
* determined by the real parts except when they are equal, in which case the order is determined by the imaginary parts.
*
* In numpy versions >= 1.4.0 nan values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
* where R is a non-nan real value. Complex values with the same nan placements are sorted according to the non-nan part if
* it exists. Non-nan values are sorted as before."
* Ref: https://numpy.org/doc/stable/reference/generated/numpy.sort.html
*/
template <typename T>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2))
THRUST_OPTIONAL_CPP11_CONSTEXPR
#endif
bool _tuple_less(const thrust::tuple<size_t, T>& lhs,
const thrust::tuple<size_t, T>& rhs) {
const size_t& lhs_k = lhs.template get<0>();
const size_t& rhs_k = rhs.template get<0>();
const T& lhs_v = lhs.template get<1>();
const T& rhs_v = rhs.template get<1>();
const thrust::less<T> _less;
// tuple's comparison rule: compare the 1st member, then 2nd, then 3rd, ...,
// which should be respected
if (lhs_k < rhs_k) {
return true;
} else if (lhs_k == rhs_k) {
// same key, compare values
// note that we can't rely on native operator< due to NaN, so we rely on
// thrust::less() to be specialized shortly
return _less(lhs_v, rhs_v);
} else {
return false;
}
}
/*
* ********** complex numbers **********
* We need to specialize thrust::less because obviously we can't overload operator< for complex numbers...
*/
template <typename T>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2))
THRUST_OPTIONAL_CPP11_CONSTEXPR
#endif
bool _cmp_less(const T& lhs, const T& rhs) {
bool lhsRe = isnan(lhs.real());
bool lhsIm = isnan(lhs.imag());
bool rhsRe = isnan(rhs.real());
bool rhsIm = isnan(rhs.imag());
// neither side has nan
if (!lhsRe && !lhsIm && !rhsRe && !rhsIm) {
return lhs < rhs;
}
// one side has nan, and the other does not
if (!lhsRe && !lhsIm && (rhsRe || rhsIm)) {
return true;
}
if ((lhsRe || lhsIm) && !rhsRe && !rhsIm) {
return false;
}
// pick 2 from 3 possibilities (R + nanj, nan + Rj, nan + nanj)
if (lhsRe && !rhsRe) {
return false;
}
if (!lhsRe && rhsRe) {
return true;
}
if (lhsIm && !rhsIm) {
return false;
}
if (!lhsIm && rhsIm) {
return true;
}
// pick 1 from 3 and compare the numerical values (nan+nan*I compares to itself as false)
return (((lhsIm && rhsIm) && (lhs.real() < rhs.real())) || ((lhsRe && rhsRe) && (lhs.imag() < rhs.imag())));
}
// specialize thrust::less for single complex
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less<complex<float>>::operator() (
const complex<float>& lhs, const complex<float>& rhs) const {
return _cmp_less<complex<float>>(lhs, rhs);
}
// specialize thrust::less for double complex
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less<complex<double>>::operator() (
const complex<double>& lhs, const complex<double>& rhs) const {
return _cmp_less<complex<double>>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, complex<float>>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less< thrust::tuple<size_t, complex<float>> >::operator() (
const thrust::tuple<size_t, complex<float>>& lhs, const thrust::tuple<size_t, complex<float>>& rhs) const {
return _tuple_less<complex<float>>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, complex<double>>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less< thrust::tuple<size_t, complex<double>> >::operator() (
const thrust::tuple<size_t, complex<double>>& lhs, const thrust::tuple<size_t, complex<double>>& rhs) const {
return _tuple_less<complex<double>>(lhs, rhs);
}
/*
* ********** real numbers (templates) **********
* We need to specialize thrust::less because obviously we can't overload operator< for floating point numbers...
*/
template <typename T>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2))
THRUST_OPTIONAL_CPP11_CONSTEXPR
#endif
bool _real_less(const T& lhs, const T& rhs) {
#if (defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__))
if (isnan(lhs)) {
return false;
} else if (isnan(rhs)) {
return true;
} else {
return lhs < rhs;
}
#else
return false; // This will be never executed in the host
#endif
}
/*
* ********** real numbers (specializations for single & double precisions) **********
*/
// specialize thrust::less for float
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less<float>::operator() (
const float& lhs, const float& rhs) const {
return _real_less<float>(lhs, rhs);
}
// specialize thrust::less for double
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less<double>::operator() (
const double& lhs, const double& rhs) const {
return _real_less<double>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, float>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less< thrust::tuple<size_t, float> >::operator() (
const thrust::tuple<size_t, float>& lhs, const thrust::tuple<size_t, float>& rhs) const {
return _tuple_less<float>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, double>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less< thrust::tuple<size_t, double> >::operator() (
const thrust::tuple<size_t, double>& lhs, const thrust::tuple<size_t, double>& rhs) const {
return _tuple_less<double>(lhs, rhs);
}
/*
* ********** real numbers (specializations for half precision) **********
*/
#if ((__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))) || (defined(__HIPCC__) || defined(CUPY_USE_HIP))
// it seems Thrust doesn't care the code path on host, so we just need a wrapper for device
__host__ __device__ __forceinline__ bool isnan(const __half& x) {
#if (defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__))
return __hisnan(x);
#else
return false; // This will never be called on the host
#endif
}
// specialize thrust::less for __half
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less<__half>::operator() (const __half& lhs, const __half& rhs) const {
return _real_less<__half>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, __half>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less< thrust::tuple<size_t, __half> >::operator() (
const thrust::tuple<size_t, __half>& lhs, const thrust::tuple<size_t, __half>& rhs) const {
return _tuple_less<__half>(lhs, rhs);
}
#endif // include cupy_fp16.h
/*
* -------------------------------------------------- end of boilerplate --------------------------------------------------
*/
/*
* sort
*/
struct _sort {
template <typename T>
__forceinline__ void operator()(void *data_start, size_t *keys_start,
const std::vector<ptrdiff_t>& shape, intptr_t stream,
void* memory) {
size_t ndim = shape.size();
ptrdiff_t size;
thrust::device_ptr<T> dp_data_first, dp_data_last;
thrust::device_ptr<size_t> dp_keys_first, dp_keys_last;
hipStream_t stream_ = (hipStream_t)stream;
cupy_allocator alloc(memory);
// Compute the total size of the array.
size = shape[0];
for (size_t i = 1; i < ndim; ++i) {
size *= shape[i];
}
dp_data_first = thrust::device_pointer_cast(static_cast<T*>(data_start));
dp_data_last = thrust::device_pointer_cast(static_cast<T*>(data_start) + size);
if (ndim == 1) {
stable_sort(cuda::par(alloc).on(stream_), dp_data_first, dp_data_last, thrust::less<T>());
} else {
// Generate key indices.
dp_keys_first = thrust::device_pointer_cast(keys_start);
dp_keys_last = thrust::device_pointer_cast(keys_start + size);
transform(cuda::par(alloc).on(stream_),
#ifdef __HIP_PLATFORM_HCC__
rocprim::make_counting_iterator<size_t>(0),
rocprim::make_counting_iterator<size_t>(size),
rocprim::make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
#else
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(size),
thrust::make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
#endif
dp_keys_first,
thrust::divides<size_t>());
stable_sort(
cuda::par(alloc).on(stream_),
make_zip_iterator(make_tuple(dp_keys_first, dp_data_first)),
make_zip_iterator(make_tuple(dp_keys_last, dp_data_last)),
thrust::less< thrust::tuple<size_t, T> >());
}
}
};
/*
* lexsort
*/
template <typename T>
class elem_less {
public:
elem_less(const T *data):_data(data) {}
__device__ __forceinline__ bool operator()(size_t i, size_t j) const {
return thrust::less<T>()(_data[i], _data[j]);
}
private:
const T *_data;
};
struct _lexsort {
template <typename T>
__forceinline__ void operator()(size_t *idx_start, void *keys_start, size_t k,
size_t n, intptr_t stream, void *memory) {
/* idx_start is the beginning of the output array where the indexes that
would sort the data will be placed. The original contents of idx_start
will be destroyed. */
thrust::device_ptr<size_t> dp_first = thrust::device_pointer_cast(idx_start);
thrust::device_ptr<size_t> dp_last = thrust::device_pointer_cast(idx_start + n);
hipStream_t stream_ = (hipStream_t)stream;
cupy_allocator alloc(memory);
sequence(cuda::par(alloc).on(stream_), dp_first, dp_last);
for (size_t i = 0; i < k; ++i) {
T *key_start = static_cast<T*>(keys_start) + i * n;
stable_sort(
cuda::par(alloc).on(stream_),
dp_first,
dp_last,
elem_less<T>(key_start)
);
}
}
};
/*
* argsort
*/
struct _argsort {
template <typename T>
__forceinline__ void operator()(size_t *idx_start, void *data_start,
void *keys_start,
const std::vector<ptrdiff_t>& shape,
intptr_t stream, void *memory) {
/* idx_start is the beginning of the output array where the indexes that
would sort the data will be placed. The original contents of idx_start
will be destroyed. */
size_t ndim = shape.size();
ptrdiff_t size;
hipStream_t stream_ = (hipStream_t)stream;
cupy_allocator alloc(memory);
thrust::device_ptr<size_t> dp_idx_first, dp_idx_last;
thrust::device_ptr<T> dp_data_first, dp_data_last;
thrust::device_ptr<size_t> dp_keys_first, dp_keys_last;
// Compute the total size of the data array.
size = shape[0];
for (size_t i = 1; i < ndim; ++i) {
size *= shape[i];
}
// Cast device pointers of data.
dp_data_first = thrust::device_pointer_cast(static_cast<T*>(data_start));
dp_data_last = thrust::device_pointer_cast(static_cast<T*>(data_start) + size);
// Generate an index sequence.
dp_idx_first = thrust::device_pointer_cast(static_cast<size_t*>(idx_start));
dp_idx_last = thrust::device_pointer_cast(static_cast<size_t*>(idx_start) + size);
transform(cuda::par(alloc).on(stream_),
#ifdef __HIP_PLATFORM_HCC__
rocprim::make_counting_iterator<size_t>(0),
rocprim::make_counting_iterator<size_t>(size),
rocprim::make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
#else
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(size),
thrust::make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
#endif
dp_idx_first,
thrust::modulus<size_t>());
if (ndim == 1) {
// Sort the index sequence by data.
stable_sort_by_key(cuda::par(alloc).on(stream_),
dp_data_first,
dp_data_last,
dp_idx_first);
} else {
// Generate key indices.
dp_keys_first = thrust::device_pointer_cast(static_cast<size_t*>(keys_start));
dp_keys_last = thrust::device_pointer_cast(static_cast<size_t*>(keys_start) + size);
transform(cuda::par(alloc).on(stream_),
#ifdef __HIP_PLATFORM_HCC__
rocprim::make_counting_iterator<size_t>(0),
rocprim::make_counting_iterator<size_t>(size),
rocprim::make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
#else
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(size),
thrust::make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
#endif
dp_keys_first,
thrust::divides<size_t>());
stable_sort_by_key(
cuda::par(alloc).on(stream_),
make_zip_iterator(make_tuple(dp_keys_first, dp_data_first)),
make_zip_iterator(make_tuple(dp_keys_last, dp_data_last)),
dp_idx_first);
}
}
};
//
// APIs exposed to CuPy
//
/* -------- sort -------- */
void thrust_sort(int dtype_id, void *data_start, size_t *keys_start,
const std::vector<ptrdiff_t>& shape, intptr_t stream, void* memory) {
_sort op;
return dtype_dispatcher(dtype_id, op, data_start, keys_start, shape, stream, memory);
}
/* -------- lexsort -------- */
void thrust_lexsort(int dtype_id, size_t *idx_start, void *keys_start, size_t k,
size_t n, intptr_t stream, void *memory) {
_lexsort op;
return dtype_dispatcher(dtype_id, op, idx_start, keys_start, k, n, stream, memory);
}
/* -------- argsort -------- */
void thrust_argsort(int dtype_id, size_t *idx_start, void *data_start,
void *keys_start, const std::vector<ptrdiff_t>& shape, intptr_t stream, void *memory) {
_argsort op;
return dtype_dispatcher(dtype_id, op, idx_start, data_start, keys_start, shape,
stream, memory);
}
| f4d34887ecea7dcb2a6b50193d1aa87699e56009.cu | #include <cupy/type_dispatcher.cuh>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/tuple.h>
#include <thrust/execution_policy.h>
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
// This is used to avoid a problem with constexpr in functions declarations introduced in
// cuda 11.2, MSVC 15 does not fully support it so we need a dummy constexpr declaration
// that is provided by this header. However optional.h is only available
// starting CUDA 10.1
#include <thrust/optional.h>
#ifdef _MSC_VER
#define THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS constexpr
#else
#define THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS THRUST_OPTIONAL_CPP11_CONSTEXPR
#endif
#endif
#include "cupy_thrust.h"
#if CUPY_USE_HIP
typedef hipStream_t cudaStream_t;
namespace cuda {
using thrust::hip::par;
}
#else // #if CUPY_USE_HIP
namespace cuda {
using thrust::cuda::par;
}
#endif // #if CUPY_USE_HIP
extern "C" char *cupy_malloc(void *, size_t);
extern "C" void cupy_free(void *, char *);
class cupy_allocator {
private:
void* memory;
public:
typedef char value_type;
cupy_allocator(void* memory) : memory(memory) {}
char *allocate(size_t num_bytes) {
return cupy_malloc(memory, num_bytes);
}
void deallocate(char *ptr, size_t n) {
cupy_free(memory, ptr);
}
};
/*
* ------------------------------------- Minimum boilerplate for NumPy compatibility --------------------------------------
* We need a specialized operator< here in order to match the NumPy behavior:
* "The sort order for complex numbers is lexicographic. If both the real and imaginary parts are non-nan then the order is
* determined by the real parts except when they are equal, in which case the order is determined by the imaginary parts.
*
* In numpy versions >= 1.4.0 nan values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
* where R is a non-nan real value. Complex values with the same nan placements are sorted according to the non-nan part if
* it exists. Non-nan values are sorted as before."
* Ref: https://numpy.org/doc/stable/reference/generated/numpy.sort.html
*/
template <typename T>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2))
THRUST_OPTIONAL_CPP11_CONSTEXPR
#endif
bool _tuple_less(const thrust::tuple<size_t, T>& lhs,
const thrust::tuple<size_t, T>& rhs) {
const size_t& lhs_k = lhs.template get<0>();
const size_t& rhs_k = rhs.template get<0>();
const T& lhs_v = lhs.template get<1>();
const T& rhs_v = rhs.template get<1>();
const thrust::less<T> _less;
// tuple's comparison rule: compare the 1st member, then 2nd, then 3rd, ...,
// which should be respected
if (lhs_k < rhs_k) {
return true;
} else if (lhs_k == rhs_k) {
// same key, compare values
// note that we can't rely on native operator< due to NaN, so we rely on
// thrust::less() to be specialized shortly
return _less(lhs_v, rhs_v);
} else {
return false;
}
}
/*
* ********** complex numbers **********
* We need to specialize thrust::less because obviously we can't overload operator< for complex numbers...
*/
template <typename T>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2))
THRUST_OPTIONAL_CPP11_CONSTEXPR
#endif
bool _cmp_less(const T& lhs, const T& rhs) {
bool lhsRe = isnan(lhs.real());
bool lhsIm = isnan(lhs.imag());
bool rhsRe = isnan(rhs.real());
bool rhsIm = isnan(rhs.imag());
// neither side has nan
if (!lhsRe && !lhsIm && !rhsRe && !rhsIm) {
return lhs < rhs;
}
// one side has nan, and the other does not
if (!lhsRe && !lhsIm && (rhsRe || rhsIm)) {
return true;
}
if ((lhsRe || lhsIm) && !rhsRe && !rhsIm) {
return false;
}
// pick 2 from 3 possibilities (R + nanj, nan + Rj, nan + nanj)
if (lhsRe && !rhsRe) {
return false;
}
if (!lhsRe && rhsRe) {
return true;
}
if (lhsIm && !rhsIm) {
return false;
}
if (!lhsIm && rhsIm) {
return true;
}
// pick 1 from 3 and compare the numerical values (nan+nan*I compares to itself as false)
return (((lhsIm && rhsIm) && (lhs.real() < rhs.real())) || ((lhsRe && rhsRe) && (lhs.imag() < rhs.imag())));
}
// specialize thrust::less for single complex
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less<complex<float>>::operator() (
const complex<float>& lhs, const complex<float>& rhs) const {
return _cmp_less<complex<float>>(lhs, rhs);
}
// specialize thrust::less for double complex
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less<complex<double>>::operator() (
const complex<double>& lhs, const complex<double>& rhs) const {
return _cmp_less<complex<double>>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, complex<float>>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less< thrust::tuple<size_t, complex<float>> >::operator() (
const thrust::tuple<size_t, complex<float>>& lhs, const thrust::tuple<size_t, complex<float>>& rhs) const {
return _tuple_less<complex<float>>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, complex<double>>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less< thrust::tuple<size_t, complex<double>> >::operator() (
const thrust::tuple<size_t, complex<double>>& lhs, const thrust::tuple<size_t, complex<double>>& rhs) const {
return _tuple_less<complex<double>>(lhs, rhs);
}
/*
* ********** real numbers (templates) **********
* We need to specialize thrust::less because obviously we can't overload operator< for floating point numbers...
*/
template <typename T>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2))
THRUST_OPTIONAL_CPP11_CONSTEXPR
#endif
bool _real_less(const T& lhs, const T& rhs) {
#if (defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__))
if (isnan(lhs)) {
return false;
} else if (isnan(rhs)) {
return true;
} else {
return lhs < rhs;
}
#else
return false; // This will be never executed in the host
#endif
}
/*
* ********** real numbers (specializations for single & double precisions) **********
*/
// specialize thrust::less for float
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less<float>::operator() (
const float& lhs, const float& rhs) const {
return _real_less<float>(lhs, rhs);
}
// specialize thrust::less for double
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less<double>::operator() (
const double& lhs, const double& rhs) const {
return _real_less<double>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, float>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less< thrust::tuple<size_t, float> >::operator() (
const thrust::tuple<size_t, float>& lhs, const thrust::tuple<size_t, float>& rhs) const {
return _tuple_less<float>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, double>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less< thrust::tuple<size_t, double> >::operator() (
const thrust::tuple<size_t, double>& lhs, const thrust::tuple<size_t, double>& rhs) const {
return _tuple_less<double>(lhs, rhs);
}
/*
* ********** real numbers (specializations for half precision) **********
*/
#if ((__CUDACC_VER_MAJOR__ > 9 || (__CUDACC_VER_MAJOR__ == 9 && __CUDACC_VER_MINOR__ == 2)) \
&& (__CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__))) || (defined(__HIPCC__) || defined(CUPY_USE_HIP))
// it seems Thrust doesn't care the code path on host, so we just need a wrapper for device
__host__ __device__ __forceinline__ bool isnan(const __half& x) {
#if (defined(__CUDA_ARCH__) || defined(__HIP_DEVICE_COMPILE__))
return __hisnan(x);
#else
return false; // This will never be called on the host
#endif
}
// specialize thrust::less for __half
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less<__half>::operator() (const __half& lhs, const __half& rhs) const {
return _real_less<__half>(lhs, rhs);
}
// specialize thrust::less for tuple<size_t, __half>
template <>
__host__ __device__ __forceinline__
#if (__CUDACC_VER_MAJOR__ >11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 2) || HIP_VERSION >= 402)
THRUST_OPTIONAL_CPP11_CONSTEXPR_LESS
#endif
bool thrust::less< thrust::tuple<size_t, __half> >::operator() (
const thrust::tuple<size_t, __half>& lhs, const thrust::tuple<size_t, __half>& rhs) const {
return _tuple_less<__half>(lhs, rhs);
}
#endif // include cupy_fp16.h
/*
* -------------------------------------------------- end of boilerplate --------------------------------------------------
*/
/*
* sort
*/
struct _sort {
template <typename T>
__forceinline__ void operator()(void *data_start, size_t *keys_start,
const std::vector<ptrdiff_t>& shape, intptr_t stream,
void* memory) {
size_t ndim = shape.size();
ptrdiff_t size;
thrust::device_ptr<T> dp_data_first, dp_data_last;
thrust::device_ptr<size_t> dp_keys_first, dp_keys_last;
cudaStream_t stream_ = (cudaStream_t)stream;
cupy_allocator alloc(memory);
// Compute the total size of the array.
size = shape[0];
for (size_t i = 1; i < ndim; ++i) {
size *= shape[i];
}
dp_data_first = thrust::device_pointer_cast(static_cast<T*>(data_start));
dp_data_last = thrust::device_pointer_cast(static_cast<T*>(data_start) + size);
if (ndim == 1) {
stable_sort(cuda::par(alloc).on(stream_), dp_data_first, dp_data_last, thrust::less<T>());
} else {
// Generate key indices.
dp_keys_first = thrust::device_pointer_cast(keys_start);
dp_keys_last = thrust::device_pointer_cast(keys_start + size);
transform(cuda::par(alloc).on(stream_),
#ifdef __HIP_PLATFORM_HCC__
rocprim::make_counting_iterator<size_t>(0),
rocprim::make_counting_iterator<size_t>(size),
rocprim::make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
#else
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(size),
thrust::make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
#endif
dp_keys_first,
thrust::divides<size_t>());
stable_sort(
cuda::par(alloc).on(stream_),
make_zip_iterator(make_tuple(dp_keys_first, dp_data_first)),
make_zip_iterator(make_tuple(dp_keys_last, dp_data_last)),
thrust::less< thrust::tuple<size_t, T> >());
}
}
};
/*
* lexsort
*/
template <typename T>
class elem_less {
public:
elem_less(const T *data):_data(data) {}
__device__ __forceinline__ bool operator()(size_t i, size_t j) const {
return thrust::less<T>()(_data[i], _data[j]);
}
private:
const T *_data;
};
struct _lexsort {
template <typename T>
__forceinline__ void operator()(size_t *idx_start, void *keys_start, size_t k,
size_t n, intptr_t stream, void *memory) {
/* idx_start is the beginning of the output array where the indexes that
would sort the data will be placed. The original contents of idx_start
will be destroyed. */
thrust::device_ptr<size_t> dp_first = thrust::device_pointer_cast(idx_start);
thrust::device_ptr<size_t> dp_last = thrust::device_pointer_cast(idx_start + n);
cudaStream_t stream_ = (cudaStream_t)stream;
cupy_allocator alloc(memory);
sequence(cuda::par(alloc).on(stream_), dp_first, dp_last);
for (size_t i = 0; i < k; ++i) {
T *key_start = static_cast<T*>(keys_start) + i * n;
stable_sort(
cuda::par(alloc).on(stream_),
dp_first,
dp_last,
elem_less<T>(key_start)
);
}
}
};
/*
* argsort
*/
struct _argsort {
template <typename T>
__forceinline__ void operator()(size_t *idx_start, void *data_start,
void *keys_start,
const std::vector<ptrdiff_t>& shape,
intptr_t stream, void *memory) {
/* idx_start is the beginning of the output array where the indexes that
would sort the data will be placed. The original contents of idx_start
will be destroyed. */
size_t ndim = shape.size();
ptrdiff_t size;
cudaStream_t stream_ = (cudaStream_t)stream;
cupy_allocator alloc(memory);
thrust::device_ptr<size_t> dp_idx_first, dp_idx_last;
thrust::device_ptr<T> dp_data_first, dp_data_last;
thrust::device_ptr<size_t> dp_keys_first, dp_keys_last;
// Compute the total size of the data array.
size = shape[0];
for (size_t i = 1; i < ndim; ++i) {
size *= shape[i];
}
// Cast device pointers of data.
dp_data_first = thrust::device_pointer_cast(static_cast<T*>(data_start));
dp_data_last = thrust::device_pointer_cast(static_cast<T*>(data_start) + size);
// Generate an index sequence.
dp_idx_first = thrust::device_pointer_cast(static_cast<size_t*>(idx_start));
dp_idx_last = thrust::device_pointer_cast(static_cast<size_t*>(idx_start) + size);
transform(cuda::par(alloc).on(stream_),
#ifdef __HIP_PLATFORM_HCC__
rocprim::make_counting_iterator<size_t>(0),
rocprim::make_counting_iterator<size_t>(size),
rocprim::make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
#else
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(size),
thrust::make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
#endif
dp_idx_first,
thrust::modulus<size_t>());
if (ndim == 1) {
// Sort the index sequence by data.
stable_sort_by_key(cuda::par(alloc).on(stream_),
dp_data_first,
dp_data_last,
dp_idx_first);
} else {
// Generate key indices.
dp_keys_first = thrust::device_pointer_cast(static_cast<size_t*>(keys_start));
dp_keys_last = thrust::device_pointer_cast(static_cast<size_t*>(keys_start) + size);
transform(cuda::par(alloc).on(stream_),
#ifdef __HIP_PLATFORM_HCC__
rocprim::make_counting_iterator<size_t>(0),
rocprim::make_counting_iterator<size_t>(size),
rocprim::make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
#else
thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(size),
thrust::make_constant_iterator<ptrdiff_t>(shape[ndim-1]),
#endif
dp_keys_first,
thrust::divides<size_t>());
stable_sort_by_key(
cuda::par(alloc).on(stream_),
make_zip_iterator(make_tuple(dp_keys_first, dp_data_first)),
make_zip_iterator(make_tuple(dp_keys_last, dp_data_last)),
dp_idx_first);
}
}
};
//
// APIs exposed to CuPy
//
/* -------- sort -------- */
void thrust_sort(int dtype_id, void *data_start, size_t *keys_start,
const std::vector<ptrdiff_t>& shape, intptr_t stream, void* memory) {
_sort op;
return dtype_dispatcher(dtype_id, op, data_start, keys_start, shape, stream, memory);
}
/* -------- lexsort -------- */
void thrust_lexsort(int dtype_id, size_t *idx_start, void *keys_start, size_t k,
size_t n, intptr_t stream, void *memory) {
_lexsort op;
return dtype_dispatcher(dtype_id, op, idx_start, keys_start, k, n, stream, memory);
}
/* -------- argsort -------- */
void thrust_argsort(int dtype_id, size_t *idx_start, void *data_start,
void *keys_start, const std::vector<ptrdiff_t>& shape, intptr_t stream, void *memory) {
_argsort op;
return dtype_dispatcher(dtype_id, op, idx_start, data_start, keys_start, shape,
stream, memory);
}
|
f1a7091a4e7ccfae2bdc39070357bbba23841f27.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void computeScoreGradientList(float *trans_x, float *trans_y, float *trans_z, int *valid_points, int *starting_voxel_id, int *voxel_id, int valid_points_num, double *centroid_x, double *centroid_y, double *centroid_z, int voxel_num, double *e_x_cov_x, double *cov_dxd_pi, double gauss_d1, int valid_voxel_num, double *score_gradients)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int col = blockIdx.y;
if (col < 6) {
double *sg = score_gradients + col * valid_points_num;
double *cov_dxd_pi_mat0 = cov_dxd_pi + col * valid_voxel_num;
double *cov_dxd_pi_mat1 = cov_dxd_pi_mat0 + 6 * valid_voxel_num;
double *cov_dxd_pi_mat2 = cov_dxd_pi_mat1 + 6 * valid_voxel_num;
for (int i = id; i < valid_points_num; i += stride) {
int pid = valid_points[i];
double d_x = static_cast<double>(trans_x[pid]);
double d_y = static_cast<double>(trans_y[pid]);
double d_z = static_cast<double>(trans_z[pid]);
double tmp_sg = 0.0;
for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) {
int vid = voxel_id[j];
double tmp_ex = e_x_cov_x[j];
if (!(tmp_ex > 1 || tmp_ex < 0 || tmp_ex != tmp_ex)) {
tmp_ex *= gauss_d1;
tmp_sg += ((d_x - centroid_x[vid]) * cov_dxd_pi_mat0[j] + (d_y - centroid_y[vid]) * cov_dxd_pi_mat1[j] + (d_z - centroid_z[vid]) * cov_dxd_pi_mat2[j]) * tmp_ex;
}
}
sg[i] = tmp_sg;
}
}
} | f1a7091a4e7ccfae2bdc39070357bbba23841f27.cu | #include "includes.h"
__global__ void computeScoreGradientList(float *trans_x, float *trans_y, float *trans_z, int *valid_points, int *starting_voxel_id, int *voxel_id, int valid_points_num, double *centroid_x, double *centroid_y, double *centroid_z, int voxel_num, double *e_x_cov_x, double *cov_dxd_pi, double gauss_d1, int valid_voxel_num, double *score_gradients)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int col = blockIdx.y;
if (col < 6) {
double *sg = score_gradients + col * valid_points_num;
double *cov_dxd_pi_mat0 = cov_dxd_pi + col * valid_voxel_num;
double *cov_dxd_pi_mat1 = cov_dxd_pi_mat0 + 6 * valid_voxel_num;
double *cov_dxd_pi_mat2 = cov_dxd_pi_mat1 + 6 * valid_voxel_num;
for (int i = id; i < valid_points_num; i += stride) {
int pid = valid_points[i];
double d_x = static_cast<double>(trans_x[pid]);
double d_y = static_cast<double>(trans_y[pid]);
double d_z = static_cast<double>(trans_z[pid]);
double tmp_sg = 0.0;
for ( int j = starting_voxel_id[i]; j < starting_voxel_id[i + 1]; j++) {
int vid = voxel_id[j];
double tmp_ex = e_x_cov_x[j];
if (!(tmp_ex > 1 || tmp_ex < 0 || tmp_ex != tmp_ex)) {
tmp_ex *= gauss_d1;
tmp_sg += ((d_x - centroid_x[vid]) * cov_dxd_pi_mat0[j] + (d_y - centroid_y[vid]) * cov_dxd_pi_mat1[j] + (d_z - centroid_z[vid]) * cov_dxd_pi_mat2[j]) * tmp_ex;
}
}
sg[i] = tmp_sg;
}
}
} |
592fc03ff27e2538ef0daa232f9789c814487d18.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#define EIGEN_USE_GPU
#include "ContinuousConvTransposeBackpropFilterOpKernel.h"
#include "open3d/core/CUDAUtils.h"
#include "open3d/ml/impl/continuous_conv/ContinuousConvTransposeBackpropFilter.cuh"
using namespace open3d;
using namespace open3d::ml;
using namespace open3d::ml::impl;
using namespace tensorflow;
template <class TFeat, class TOut, class TReal, class TIndex>
class ContinuousConvTransposeBackpropFilterOpKernelCUDA
: public ContinuousConvTransposeBackpropFilterOpKernel<TIndex> {
public:
explicit ContinuousConvTransposeBackpropFilterOpKernelCUDA(
OpKernelConstruction* construction)
: ContinuousConvTransposeBackpropFilterOpKernel<TIndex>(construction) {
texture_alignment =
open3d::core::GetCUDACurrentDeviceTextureAlignment();
}
void Kernel(tensorflow::OpKernelContext* context,
const tensorflow::Tensor& filter,
const tensorflow::Tensor& out_positions,
const tensorflow::Tensor& out_importance,
const tensorflow::Tensor& extents,
const tensorflow::Tensor& offset,
const tensorflow::Tensor& inp_positions,
const tensorflow::Tensor& inp_features,
const tensorflow::Tensor& inp_neighbors_importance_sum,
const tensorflow::Tensor& inp_neighbors_row_splits,
const tensorflow::Tensor& neighbors_index,
const tensorflow::Tensor& neighbors_importance,
const tensorflow::Tensor& neighbors_row_splits,
const tensorflow::Tensor& out_features_gradient,
const std::vector<int>& filter_dims,
const bool individual_extents,
const bool isotropic_extents,
const bool point_importances,
const bool has_neighbors_importances,
tensorflow::Tensor& filter_backprop) {
auto device = context->eigen_gpu_device();
void* temp_ptr = nullptr;
size_t temp_size = 0;
size_t max_temp_size = 0;
// determine temp_size
CConvTransposeBackpropFilterCUDA<TFeat, TOut, TReal, TIndex>(
device.stream(), temp_ptr, temp_size, max_temp_size,
texture_alignment, filter_backprop.flat<TOut>().data(),
filter_dims, out_positions.shape().dim_size(0),
out_positions.flat<TReal>().data(),
point_importances ? out_importance.flat<TFeat>().data()
: nullptr,
inp_positions.shape().dim_size(0),
inp_positions.flat<TReal>().data(),
inp_features.flat<TFeat>().data(),
has_neighbors_importances
? inp_neighbors_importance_sum.flat<TFeat>().data()
: nullptr,
(int64_t*)inp_neighbors_row_splits.flat<int64>().data(),
neighbors_index.shape().dim_size(0),
(TIndex*)neighbors_index.flat<TIndex>().data(),
has_neighbors_importances
? neighbors_importance.flat<TFeat>().data()
: nullptr,
(int64_t*)neighbors_row_splits.flat<int64>().data(),
extents.flat<TReal>().data(), offset.flat<TReal>().data(),
out_features_gradient.flat<TReal>().data(), this->interpolation,
this->coordinate_mapping, this->align_corners,
individual_extents, isotropic_extents, this->normalize);
temp_size =
::max(::min(size_t(this->max_temp_mem_MB) * 1024 * 1024,
max_temp_size),
temp_size);
Tensor temp_tensor;
TensorShape temp_shape({ssize_t(temp_size)});
OP_REQUIRES_OK(context,
context->allocate_temp(DataTypeToEnum<uint8_t>::v(),
temp_shape, &temp_tensor));
temp_ptr = temp_tensor.flat<uint8_t>().data();
// actually run the operation
CConvTransposeBackpropFilterCUDA<TFeat, TOut, TReal, TIndex>(
device.stream(), temp_ptr, temp_size, max_temp_size,
texture_alignment, filter_backprop.flat<TOut>().data(),
filter_dims, out_positions.shape().dim_size(0),
out_positions.flat<TReal>().data(),
point_importances ? out_importance.flat<TFeat>().data()
: nullptr,
inp_positions.shape().dim_size(0),
inp_positions.flat<TReal>().data(),
inp_features.flat<TFeat>().data(),
has_neighbors_importances
? inp_neighbors_importance_sum.flat<TFeat>().data()
: nullptr,
(int64_t*)inp_neighbors_row_splits.flat<int64>().data(),
neighbors_index.shape().dim_size(0),
(TIndex*)neighbors_index.flat<TIndex>().data(),
has_neighbors_importances
? neighbors_importance.flat<TFeat>().data()
: nullptr,
(int64_t*)neighbors_row_splits.flat<int64>().data(),
extents.flat<TReal>().data(), offset.flat<TReal>().data(),
out_features_gradient.flat<TReal>().data(), this->interpolation,
this->coordinate_mapping, this->align_corners,
individual_extents, isotropic_extents, this->normalize);
}
private:
int texture_alignment;
};
#define REG_KB(feattype, outtype, realtype, indextype) \
REGISTER_KERNEL_BUILDER( \
Name("Open3DContinuousConvTransposeBackpropFilter") \
.Device(DEVICE_GPU) \
.TypeConstraint<feattype>("TFeat") \
.TypeConstraint<outtype>("output_type") \
.TypeConstraint<realtype>("TReal") \
.TypeConstraint<indextype>("TIndex"), \
ContinuousConvTransposeBackpropFilterOpKernelCUDA< \
feattype, outtype, realtype, indextype>);
REG_KB(float, float, float, int32)
#undef REG_KB
| 592fc03ff27e2538ef0daa232f9789c814487d18.cu | // ----------------------------------------------------------------------------
// - Open3D: www.open3d.org -
// ----------------------------------------------------------------------------
// The MIT License (MIT)
//
// Copyright (c) 2018-2021 www.open3d.org
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
// ----------------------------------------------------------------------------
#define EIGEN_USE_GPU
#include "ContinuousConvTransposeBackpropFilterOpKernel.h"
#include "open3d/core/CUDAUtils.h"
#include "open3d/ml/impl/continuous_conv/ContinuousConvTransposeBackpropFilter.cuh"
using namespace open3d;
using namespace open3d::ml;
using namespace open3d::ml::impl;
using namespace tensorflow;
template <class TFeat, class TOut, class TReal, class TIndex>
class ContinuousConvTransposeBackpropFilterOpKernelCUDA
: public ContinuousConvTransposeBackpropFilterOpKernel<TIndex> {
public:
explicit ContinuousConvTransposeBackpropFilterOpKernelCUDA(
OpKernelConstruction* construction)
: ContinuousConvTransposeBackpropFilterOpKernel<TIndex>(construction) {
texture_alignment =
open3d::core::GetCUDACurrentDeviceTextureAlignment();
}
void Kernel(tensorflow::OpKernelContext* context,
const tensorflow::Tensor& filter,
const tensorflow::Tensor& out_positions,
const tensorflow::Tensor& out_importance,
const tensorflow::Tensor& extents,
const tensorflow::Tensor& offset,
const tensorflow::Tensor& inp_positions,
const tensorflow::Tensor& inp_features,
const tensorflow::Tensor& inp_neighbors_importance_sum,
const tensorflow::Tensor& inp_neighbors_row_splits,
const tensorflow::Tensor& neighbors_index,
const tensorflow::Tensor& neighbors_importance,
const tensorflow::Tensor& neighbors_row_splits,
const tensorflow::Tensor& out_features_gradient,
const std::vector<int>& filter_dims,
const bool individual_extents,
const bool isotropic_extents,
const bool point_importances,
const bool has_neighbors_importances,
tensorflow::Tensor& filter_backprop) {
auto device = context->eigen_gpu_device();
void* temp_ptr = nullptr;
size_t temp_size = 0;
size_t max_temp_size = 0;
// determine temp_size
CConvTransposeBackpropFilterCUDA<TFeat, TOut, TReal, TIndex>(
device.stream(), temp_ptr, temp_size, max_temp_size,
texture_alignment, filter_backprop.flat<TOut>().data(),
filter_dims, out_positions.shape().dim_size(0),
out_positions.flat<TReal>().data(),
point_importances ? out_importance.flat<TFeat>().data()
: nullptr,
inp_positions.shape().dim_size(0),
inp_positions.flat<TReal>().data(),
inp_features.flat<TFeat>().data(),
has_neighbors_importances
? inp_neighbors_importance_sum.flat<TFeat>().data()
: nullptr,
(int64_t*)inp_neighbors_row_splits.flat<int64>().data(),
neighbors_index.shape().dim_size(0),
(TIndex*)neighbors_index.flat<TIndex>().data(),
has_neighbors_importances
? neighbors_importance.flat<TFeat>().data()
: nullptr,
(int64_t*)neighbors_row_splits.flat<int64>().data(),
extents.flat<TReal>().data(), offset.flat<TReal>().data(),
out_features_gradient.flat<TReal>().data(), this->interpolation,
this->coordinate_mapping, this->align_corners,
individual_extents, isotropic_extents, this->normalize);
temp_size =
std::max(std::min(size_t(this->max_temp_mem_MB) * 1024 * 1024,
max_temp_size),
temp_size);
Tensor temp_tensor;
TensorShape temp_shape({ssize_t(temp_size)});
OP_REQUIRES_OK(context,
context->allocate_temp(DataTypeToEnum<uint8_t>::v(),
temp_shape, &temp_tensor));
temp_ptr = temp_tensor.flat<uint8_t>().data();
// actually run the operation
CConvTransposeBackpropFilterCUDA<TFeat, TOut, TReal, TIndex>(
device.stream(), temp_ptr, temp_size, max_temp_size,
texture_alignment, filter_backprop.flat<TOut>().data(),
filter_dims, out_positions.shape().dim_size(0),
out_positions.flat<TReal>().data(),
point_importances ? out_importance.flat<TFeat>().data()
: nullptr,
inp_positions.shape().dim_size(0),
inp_positions.flat<TReal>().data(),
inp_features.flat<TFeat>().data(),
has_neighbors_importances
? inp_neighbors_importance_sum.flat<TFeat>().data()
: nullptr,
(int64_t*)inp_neighbors_row_splits.flat<int64>().data(),
neighbors_index.shape().dim_size(0),
(TIndex*)neighbors_index.flat<TIndex>().data(),
has_neighbors_importances
? neighbors_importance.flat<TFeat>().data()
: nullptr,
(int64_t*)neighbors_row_splits.flat<int64>().data(),
extents.flat<TReal>().data(), offset.flat<TReal>().data(),
out_features_gradient.flat<TReal>().data(), this->interpolation,
this->coordinate_mapping, this->align_corners,
individual_extents, isotropic_extents, this->normalize);
}
private:
int texture_alignment;
};
#define REG_KB(feattype, outtype, realtype, indextype) \
REGISTER_KERNEL_BUILDER( \
Name("Open3DContinuousConvTransposeBackpropFilter") \
.Device(DEVICE_GPU) \
.TypeConstraint<feattype>("TFeat") \
.TypeConstraint<outtype>("output_type") \
.TypeConstraint<realtype>("TReal") \
.TypeConstraint<indextype>("TIndex"), \
ContinuousConvTransposeBackpropFilterOpKernelCUDA< \
feattype, outtype, realtype, indextype>);
REG_KB(float, float, float, int32)
#undef REG_KB
|
26a73673ab5c1a8af819fdedd3caaf892133dcac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/scan.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include <thrust/functional.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/iterator/counting_iterator.h>
#include <random>
#include <iostream>
const size_t vectorSize = 64;
const size_t subvectorSize = 1;
const int maxElement = 10;
__global__ void isSubvectorStartingOnIndex(int* vector, int vectorLength, int* subvector, int subvectorLength, bool* output) {
const size_t index = threadIdx.x;
bool result = true;
for (size_t i = index; i < index + subvectorLength; i++) {
if (i >= vectorLength || vector[i] != subvector[i]) {
result = false;
break;
}
}
output[index] = result;
}
int main() {
thrust::host_vector<int> hostVector(vectorSize);
for (auto& element : hostVector) {
element = std::rand() % maxElement;
std::cout << element;
}
std::cout << "\n";
thrust::host_vector<int> hostSubvector(subvectorSize);
for (auto& element : hostSubvector) {
// element = std::rand() % maxElement;
element = 2;
std::cout << element;
}
std::cout << "\n";
thrust::device_vector<int> deviceVector = hostVector;
thrust::device_vector<int> deviceSubvector = hostSubvector;
thrust::device_vector<bool> deviceResult(vectorSize);
dim3 dimBlock(vectorSize);
dim3 dimGrid(1);
hipLaunchKernelGGL(( isSubvectorStartingOnIndex), dim3(dimGrid), dim3(dimBlock), 0, 0,
deviceVector.data().get(),
vectorSize,
deviceSubvector.data().get(),
subvectorSize,
deviceResult.data().get()
);
for (const auto& element : deviceResult) {
std::cout << element << ", ";
}
std::cout << "\n";
/*
thrust::counting_iterator<int> deviceFirstIndex = thrust::make_counting_iterator(0);
thrust::counting_iterator<int> deviceLastIndex = thrust::make_counting_iterator((int)vectorSize);
thrust::device_vector<int> deviceInputNumbers = hostVector;
thrust::device_vector<int> deviceOutput(vectorSize);
thrust::copy_if(
deviceFirstIndex,
deviceLastIndex,
deviceInputNumbers.begin(),
deviceOutput.begin(),
isGreaterThanTen()
);
thrust::host_vector<int> hostOutput = deviceOutput;
for (const auto& element : hostOutput) {
std::cout << element << ", ";
}
std::cout << "\n";
*/
return 0;
} | 26a73673ab5c1a8af819fdedd3caaf892133dcac.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/scan.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include <thrust/functional.h>
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/iterator/counting_iterator.h>
#include <random>
#include <iostream>
const size_t vectorSize = 64;
const size_t subvectorSize = 1;
const int maxElement = 10;
__global__ void isSubvectorStartingOnIndex(int* vector, int vectorLength, int* subvector, int subvectorLength, bool* output) {
const size_t index = threadIdx.x;
bool result = true;
for (size_t i = index; i < index + subvectorLength; i++) {
if (i >= vectorLength || vector[i] != subvector[i]) {
result = false;
break;
}
}
output[index] = result;
}
int main() {
thrust::host_vector<int> hostVector(vectorSize);
for (auto& element : hostVector) {
element = std::rand() % maxElement;
std::cout << element;
}
std::cout << "\n";
thrust::host_vector<int> hostSubvector(subvectorSize);
for (auto& element : hostSubvector) {
// element = std::rand() % maxElement;
element = 2;
std::cout << element;
}
std::cout << "\n";
thrust::device_vector<int> deviceVector = hostVector;
thrust::device_vector<int> deviceSubvector = hostSubvector;
thrust::device_vector<bool> deviceResult(vectorSize);
dim3 dimBlock(vectorSize);
dim3 dimGrid(1);
isSubvectorStartingOnIndex<<<dimGrid, dimBlock>>>(
deviceVector.data().get(),
vectorSize,
deviceSubvector.data().get(),
subvectorSize,
deviceResult.data().get()
);
for (const auto& element : deviceResult) {
std::cout << element << ", ";
}
std::cout << "\n";
/*
thrust::counting_iterator<int> deviceFirstIndex = thrust::make_counting_iterator(0);
thrust::counting_iterator<int> deviceLastIndex = thrust::make_counting_iterator((int)vectorSize);
thrust::device_vector<int> deviceInputNumbers = hostVector;
thrust::device_vector<int> deviceOutput(vectorSize);
thrust::copy_if(
deviceFirstIndex,
deviceLastIndex,
deviceInputNumbers.begin(),
deviceOutput.begin(),
isGreaterThanTen()
);
thrust::host_vector<int> hostOutput = deviceOutput;
for (const auto& element : hostOutput) {
std::cout << element << ", ";
}
std::cout << "\n";
*/
return 0;
} |
4fb516cd3726bd5646247e7b481c4adde974cc88.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@precisions normal z -> s d c
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define PRECISION_z
__global__
void magma_zlarfg_gpu_kernel( int n, magmaDoubleComplex* dx0, magmaDoubleComplex* dx,
magmaDoubleComplex *dtau, double *dxnorm, magmaDoubleComplex* dAkk)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ magmaDoubleComplex scale;
double xnorm;
magmaDoubleComplex dxi;
#if (defined(PRECISION_s) || defined(PRECISION_d))
if( n <= 1 ) {
#else
if( n <= 0 ) {
#endif
*dtau = MAGMA_Z_ZERO;
*dAkk = *dx0;
return;
}
if ( j < n-1)
dxi = dx[j];
xnorm = *dxnorm;
magmaDoubleComplex alpha = *dx0;
#if (defined(PRECISION_s) || defined(PRECISION_d))
if ( xnorm != 0 ) {
if (i == 0) {
double beta = sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = (beta - alpha) / beta;
*dAkk = beta;
scale = 1. / (alpha - beta);
}
#else
double alphar = MAGMA_Z_REAL(alpha);
double alphai = MAGMA_Z_IMAG(alpha);
if ( xnorm != 0 || alphai != 0) {
if (i == 0) {
double beta = sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = MAGMA_Z_MAKE((beta - alphar)/beta, -alphai/beta);
*dAkk = MAGMA_Z_MAKE(beta, 0.);
alpha = MAGMA_Z_MAKE( MAGMA_Z_REAL(alpha) - beta, MAGMA_Z_IMAG(alpha));
scale = MAGMA_Z_DIV( MAGMA_Z_ONE, alpha);
}
#endif
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_Z_MUL(dxi, scale);
} else {
*dtau = MAGMA_Z_ZERO;
*dAkk = *dx0;
}
}
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = norm( [dx0, dx] ) = dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's zlarfg is that the norm of dx, and hence beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_zlarfg_gpu(
magma_int_t n,
magmaDoubleComplex_ptr dx0,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex_ptr dtau,
magmaDouble_ptr dxnorm,
magmaDoubleComplex_ptr dAkk)
{
dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE);
dim3 threads( BLOCK_SIZE );
/* recomputing the norm */
//magmablas_dznrm2_cols(n, 1, dx0, n, dxnorm);
magmablas_dznrm2_cols(n-1, 1, dx0+1, n, dxnorm);
hipLaunchKernelGGL(( magma_zlarfg_gpu_kernel), dim3(blocks), dim3(threads),
0, magma_stream , n, dx0, dx, dtau, dxnorm, dAkk);
}
| 4fb516cd3726bd5646247e7b481c4adde974cc88.cu | /*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@precisions normal z -> s d c
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define PRECISION_z
__global__
void magma_zlarfg_gpu_kernel( int n, magmaDoubleComplex* dx0, magmaDoubleComplex* dx,
magmaDoubleComplex *dtau, double *dxnorm, magmaDoubleComplex* dAkk)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ magmaDoubleComplex scale;
double xnorm;
magmaDoubleComplex dxi;
#if (defined(PRECISION_s) || defined(PRECISION_d))
if( n <= 1 ) {
#else
if( n <= 0 ) {
#endif
*dtau = MAGMA_Z_ZERO;
*dAkk = *dx0;
return;
}
if ( j < n-1)
dxi = dx[j];
xnorm = *dxnorm;
magmaDoubleComplex alpha = *dx0;
#if (defined(PRECISION_s) || defined(PRECISION_d))
if ( xnorm != 0 ) {
if (i == 0) {
double beta = sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = (beta - alpha) / beta;
*dAkk = beta;
scale = 1. / (alpha - beta);
}
#else
double alphar = MAGMA_Z_REAL(alpha);
double alphai = MAGMA_Z_IMAG(alpha);
if ( xnorm != 0 || alphai != 0) {
if (i == 0) {
double beta = sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = MAGMA_Z_MAKE((beta - alphar)/beta, -alphai/beta);
*dAkk = MAGMA_Z_MAKE(beta, 0.);
alpha = MAGMA_Z_MAKE( MAGMA_Z_REAL(alpha) - beta, MAGMA_Z_IMAG(alpha));
scale = MAGMA_Z_DIV( MAGMA_Z_ONE, alpha);
}
#endif
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_Z_MUL(dxi, scale);
} else {
*dtau = MAGMA_Z_ZERO;
*dAkk = *dx0;
}
}
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = ±norm( [dx0, dx] ) = ±dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's zlarfg is that the norm of dx, and hence beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" void
magma_zlarfg_gpu(
magma_int_t n,
magmaDoubleComplex_ptr dx0,
magmaDoubleComplex_ptr dx,
magmaDoubleComplex_ptr dtau,
magmaDouble_ptr dxnorm,
magmaDoubleComplex_ptr dAkk)
{
dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE);
dim3 threads( BLOCK_SIZE );
/* recomputing the norm */
//magmablas_dznrm2_cols(n, 1, dx0, n, dxnorm);
magmablas_dznrm2_cols(n-1, 1, dx0+1, n, dxnorm);
magma_zlarfg_gpu_kernel<<< blocks, threads,
0, magma_stream >>>(n, dx0, dx, dtau, dxnorm, dAkk);
}
|
fb6ac734ce4a3d3dd44f8eee5254f209ad668cfa.hip | // !!! This is a file automatically generated by hipify!!!
#include<iostream>
#include<stdlib.h>
#include <hip/hip_runtime.h>
#include <math.h>
#define RADIUS 3
int checkResults(int startElem, int endElem, float* cudaRes, float* res)
{
int nDiffs=0;
const float smallVal = 0.0001f;
for(int i=startElem; i<endElem; i++)
if(fabs(cudaRes[i]-res[i])>smallVal)
nDiffs++;
return nDiffs;
}
void initializeWeights(float* weights, int rad)
{
// for now hardcoded for RADIUS=3
weights[0] = 0.50f;
weights[1] = 0.75f;
weights[2] = 1.25f;
weights[3] = 2.00f;
weights[4] = 1.25f;
weights[5] = 0.75f;
weights[6] = 0.50f;
}
void initializeArray(FILE* fp,float* arr, int nElements)
{
for( int i=0; i<nElements; i++){
fscanf(fp,"%f",&arr[i]);
if(getc(fp) == EOF) rewind(fp);
}
}
void applyStencil1D_SEQ(int sIdx, int eIdx, const float *weights, float *in, float *out) {
for (int i = sIdx; i < eIdx; i++) {
out[i] = 0;
//loop over all elements in the stencil
for (int j = -RADIUS; j <= RADIUS; j++) {
out[i] += weights[j + RADIUS] * in[i + j];
}
out[i] = out[i] / (2 * RADIUS + 1);
}
}
__global__ void applyStencil1D(int sIdx, int eIdx, const float *weights, float *in, float *out) {
int i = sIdx + blockIdx.x*blockDim.x + threadIdx.x;
if( i < eIdx ) {
float result = 0.f;
result += weights[0]*in[i-3];
result += weights[1]*in[i-2];
result += weights[2]*in[i-1];
result += weights[3]*in[i];
result += weights[4]*in[i+1];
result += weights[5]*in[i+2];
result += weights[6]*in[i+3];
result /=7.f;
out[i] = result;
}
}
int main(int argc, char* argv[]) {
if(argc!=2){
printf("Usage %s N\n",argv[0]);
return 1;
}
int N=atoi(argv[1]);
FILE *fp = fopen("problem1.inp","r");
int size = N * sizeof(float);
int wsize = (2 * RADIUS + 1) * sizeof(float);
//allocate resources
float *weights, *in, *cuda_out;
hipHostMalloc((void **)&weights, wsize);
hipHostMalloc((void **)&in, size);
hipHostMalloc((void **)&cuda_out, size);
float *out = (float *)malloc(size);
float time = 0.f;
initializeWeights(weights, RADIUS);
initializeArray(fp,in, N);
float *d_weights; hipMalloc(&d_weights, wsize);
float *d_in; hipMalloc(&d_in, size);
float *d_out; hipMalloc(&d_out, size);
hipMemcpy(d_weights,weights,wsize,hipMemcpyHostToDevice);
hipMemcpy(d_in, in, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( applyStencil1D), dim3((N+511)/512), dim3(512), 0, 0, RADIUS, N-RADIUS, d_weights, d_in, d_out);
hipMemcpy(cuda_out, d_out, size, hipMemcpyDeviceToHost);
applyStencil1D_SEQ(RADIUS, N-RADIUS, weights, in, out);
int nDiffs = checkResults(RADIUS, N-RADIUS, cuda_out, out);
if(nDiffs)printf("Test Failed\n"); // This should never print
printf("%f\n%f\n",cuda_out[N-RADIUS-1],time);
//free resources
hipHostFree(weights); hipHostFree(in); hipHostFree(cuda_out);
free(out);
hipFree(d_weights); hipFree(d_in); hipFree(d_out);
return 0;
}
| fb6ac734ce4a3d3dd44f8eee5254f209ad668cfa.cu | #include<iostream>
#include<stdlib.h>
#include <cuda.h>
#include <math.h>
#define RADIUS 3
int checkResults(int startElem, int endElem, float* cudaRes, float* res)
{
int nDiffs=0;
const float smallVal = 0.0001f;
for(int i=startElem; i<endElem; i++)
if(fabs(cudaRes[i]-res[i])>smallVal)
nDiffs++;
return nDiffs;
}
void initializeWeights(float* weights, int rad)
{
// for now hardcoded for RADIUS=3
weights[0] = 0.50f;
weights[1] = 0.75f;
weights[2] = 1.25f;
weights[3] = 2.00f;
weights[4] = 1.25f;
weights[5] = 0.75f;
weights[6] = 0.50f;
}
void initializeArray(FILE* fp,float* arr, int nElements)
{
for( int i=0; i<nElements; i++){
fscanf(fp,"%f",&arr[i]);
if(getc(fp) == EOF) rewind(fp);
}
}
void applyStencil1D_SEQ(int sIdx, int eIdx, const float *weights, float *in, float *out) {
for (int i = sIdx; i < eIdx; i++) {
out[i] = 0;
//loop over all elements in the stencil
for (int j = -RADIUS; j <= RADIUS; j++) {
out[i] += weights[j + RADIUS] * in[i + j];
}
out[i] = out[i] / (2 * RADIUS + 1);
}
}
__global__ void applyStencil1D(int sIdx, int eIdx, const float *weights, float *in, float *out) {
int i = sIdx + blockIdx.x*blockDim.x + threadIdx.x;
if( i < eIdx ) {
float result = 0.f;
result += weights[0]*in[i-3];
result += weights[1]*in[i-2];
result += weights[2]*in[i-1];
result += weights[3]*in[i];
result += weights[4]*in[i+1];
result += weights[5]*in[i+2];
result += weights[6]*in[i+3];
result /=7.f;
out[i] = result;
}
}
int main(int argc, char* argv[]) {
if(argc!=2){
printf("Usage %s N\n",argv[0]);
return 1;
}
int N=atoi(argv[1]);
FILE *fp = fopen("problem1.inp","r");
int size = N * sizeof(float);
int wsize = (2 * RADIUS + 1) * sizeof(float);
//allocate resources
float *weights, *in, *cuda_out;
cudaMallocHost((void **)&weights, wsize);
cudaMallocHost((void **)&in, size);
cudaMallocHost((void **)&cuda_out, size);
float *out = (float *)malloc(size);
float time = 0.f;
initializeWeights(weights, RADIUS);
initializeArray(fp,in, N);
float *d_weights; cudaMalloc(&d_weights, wsize);
float *d_in; cudaMalloc(&d_in, size);
float *d_out; cudaMalloc(&d_out, size);
cudaMemcpy(d_weights,weights,wsize,cudaMemcpyHostToDevice);
cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice);
applyStencil1D<<<(N+511)/512, 512>>>(RADIUS, N-RADIUS, d_weights, d_in, d_out);
cudaMemcpy(cuda_out, d_out, size, cudaMemcpyDeviceToHost);
applyStencil1D_SEQ(RADIUS, N-RADIUS, weights, in, out);
int nDiffs = checkResults(RADIUS, N-RADIUS, cuda_out, out);
if(nDiffs)printf("Test Failed\n"); // This should never print
printf("%f\n%f\n",cuda_out[N-RADIUS-1],time);
//free resources
cudaFreeHost(weights); cudaFreeHost(in); cudaFreeHost(cuda_out);
free(out);
cudaFree(d_weights); cudaFree(d_in); cudaFree(d_out);
return 0;
}
|
1b8e24a579cda800ad1664bdbf6305c3bdb93644.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <opencv2\opencv.hpp>
#include <cusolverDn.h>
//#define SHOW_DATASET
//#define SHOW_AVERAGE
//#define SHOW_SHIFTED_IMAGES
#define BLOCKSIZE_X 16
#define BLOCKSIZE_Y 16
/******************/
/* ERROR CHECKING */
/******************/
#define cudaCHECK(ans) { checkAssert((ans), __FILE__, __LINE__); }
inline void checkAssert(hipError_t errorCode, const char *file, int line, bool abort = true)
{
if (errorCode != hipSuccess)
{
fprintf(stderr, "Check assert: %s %s %d\n", hipGetErrorString(errorCode), file, line);
if (abort) exit(errorCode);
}
}
/***************************/
/* cuSOLVER ERROR CHECKING */
/***************************/
static const char *_cuSolverReturnErrorString(cusolverStatus_t errorCode)
{
switch (errorCode) {
case CUSOLVER_STATUS_SUCCESS: return "cuSolver successful call";
case CUSOLVER_STATUS_NOT_INITIALIZED: return "cuSolver is not initialized";
case CUSOLVER_STATUS_ALLOC_FAILED: return "cuSolver internal resource allocation failed";
case CUSOLVER_STATUS_INVALID_VALUE: return "cuSolver function has an unsupported value or parameter";
case CUSOLVER_STATUS_ARCH_MISMATCH: return "cuSolver function requires an unsupported architecture feature";
case CUSOLVER_STATUS_EXECUTION_FAILED: return "cuSolver function failed to execute";
case CUSOLVER_STATUS_INTERNAL_ERROR: return "cuSolver internal operation failed";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "Matrix type not supported";
}
return "<unknown>";
}
inline void __cuSolverCHECK(cusolverStatus_t errorCode, const char *file, const int line)
{
if (CUSOLVER_STATUS_SUCCESS != errorCode) {
fprintf(stderr, "cuSolver was unsuccessful in file '%s', line %d; the reported errorCodeor is: %s \nterminating!\n", __FILE__, __LINE__, \
_cuSolverReturnErrorString(errorCode)); \
assert(0); \
}
}
void cuSolverCHECK(cusolverStatus_t errorCode) { __cuSolverCHECK(errorCode, __FILE__, __LINE__); }
/*******************/
/* iDivUp FUNCTION */
/*******************/
int iDivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); }
/**********************/
/* REMOVE MEAN KERNEL */
/**********************/
__global__ void removeMeanKernel(const float * __restrict__ srcPtr, float * __restrict__ dstPtr, const size_t srcStep, const size_t dstStep, const int Nrows, const int Ncols) {
int rowIdx = blockIdx.y * blockDim.y + threadIdx.y;
int colIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (rowIdx >= Nrows || colIdx >= Ncols) return;
const float *rowSrcPtr = (const float *)(((char *)srcPtr) + 0 * srcStep);
float *rowDstPtr = (float *)(((char *)dstPtr) + rowIdx * dstStep);
rowDstPtr[colIdx] = rowDstPtr[colIdx] - rowSrcPtr[colIdx];
}
/********/
/* MAIN */
/********/
int main() {
// --- Customized
const int numImages = 24;
const int nRows = 64;
const int nCols = 64;
const int numTrain = 24;
const int cRows = 10;
const int cCols = 10;
const int numEigenfaces = 7;
/***********/
/* STEP #1 */
/***********/
// --- Image path
std::string pathToData("D:\\Project\\Packt\\Eigenfaces\\Customized\\");
// --- GPU memory allocation
cv::cuda::GpuMat d_A(numTrain, nRows * nCols, CV_32FC1);
// --- Loading dataset images
cv::Mat h_imageTemp, h_imageTempCast, h_imageTempResized;
float *rowPointer;
for (int k = 0; k < numImages; k++) {
std::stringstream ss;
ss << std::setw(3) << std::setfill('0') << k;
std::string s = ss.str();
h_imageTemp = cv::imread(pathToData + s + ".png", -1);
cv::transpose(h_imageTemp, h_imageTemp);
h_imageTemp.convertTo(h_imageTempCast, CV_32FC1);
h_imageTempCast = h_imageTempCast.reshape(0, 1);
std::string ty = cv::typeToString(h_imageTemp.type());
printf("Loaded image: %s %dx%d \n", ty.c_str(), h_imageTemp.rows, h_imageTemp.cols);
ty = cv::typeToString(h_imageTempCast.type());
printf("Cast image: %s %dx%d \n", ty.c_str(), h_imageTempCast.rows, h_imageTempCast.cols);
#ifdef SHOW_DATASET
cv::resize(h_imageTemp, h_imageTempResized, cv::Size(nRows * cRows, nCols * cCols), cv::INTER_CUBIC);
cv::imshow("Dataset image", h_imageTempResized);
cv::waitKey(0);
#endif
// --- Copy generic row
rowPointer = d_A.ptr<float>(k);
cudaCHECK(hipMemcpy2D(rowPointer,
d_A.step * sizeof(float),
h_imageTempCast.ptr<float>(0),
h_imageTempCast.step * sizeof(float),
h_imageTempCast.cols * sizeof(float),
1,
hipMemcpyHostToDevice));
}
/***********/
/* STEP #2 */
/***********/
// --- Average
cv::cuda::GpuMat d_mean(1, nRows * nCols, CV_32FC1);
cv::cuda::reduce(d_A, d_mean, 0, 1);
#ifdef SHOW_AVERAGE
cv::Mat h_mean(d_mean);
h_mean = h_mean.reshape(0, nRows);
h_mean.convertTo(h_mean, CV_8UC1);
cv::resize(h_mean, h_mean, cv::Size(cRows * nRows, cCols * nCols), cv::INTER_CUBIC);
std::string ty = cv::typeToString(h_mean.type());
printf("Average image: %s %dx%d \n", ty.c_str(), h_mean.rows, h_mean.cols);
cv::imshow("Average image", h_mean);
cv::waitKey(0);
#endif
/***********/
/* STEP #3 */
/***********/
// --- Shift images
dim3 blockDim(BLOCKSIZE_X, BLOCKSIZE_Y);
dim3 gridDim(iDivUp(d_A.cols, BLOCKSIZE_X), iDivUp(d_A.rows, BLOCKSIZE_Y));
removeMeanKernel << <gridDim, blockDim >> > ((float *)d_mean.data, (float *)d_A.data, d_mean.step, d_A.step, numTrain, nRows * nCols);
cudaCHECK(hipPeekAtLastError());
cudaCHECK(hipDeviceSynchronize());
#ifdef SHOW_SHIFTED_IMAGES
count = 0;
for (int k = 0; k < numImages; k++) {
cv::Mat h_shiftedImage(1, nRows * nCols, CV_32FC1);
// --- Copy generic row
rowPointer = d_A.ptr<float>(count);
cudaCHECK(hipMemcpy2D(h_shiftedImage.ptr<float>(0),
h_shiftedImage.step * sizeof(float),
rowPointer,
d_A.step * sizeof(float),
d_A.cols * sizeof(float),
1,
hipMemcpyDeviceToHost));
h_shiftedImage = h_shiftedImage.reshape(0, nRows);
h_shiftedImage.convertTo(h_shiftedImage, CV_8UC1);
std::string ty = cv::typeToString(h_shiftedImage.type());
printf("Removed mean image: %s %dx%d \n", ty.c_str(), h_shiftedImage.rows, h_shiftedImage.cols);
cv::resize(h_shiftedImage, h_shiftedImage, cv::Size(nRows * cRows, nCols * cCols), cv::INTER_CUBIC);
cv::imshow("Dataset image", h_shiftedImage);
cv::waitKey(0);
count++;
}
#endif
/***********/
/* STEP #4 */
/***********/
cv::cuda::GpuMat d_A2;
d_A.copyTo(d_A2);
cv::cuda::multiply(d_A, 1.f / sqrt((float)numTrain), d_A);
// --- Allocating SVD space on the device
float *d_U; cudaCHECK(hipMalloc(&d_U, d_A.cols * d_A.cols * sizeof(float)));
float *d_V; cudaCHECK(hipMalloc(&d_V, d_A.rows * d_A.rows * sizeof(float)));
float *d_S; cudaCHECK(hipMalloc(&d_S, min(d_A.rows, d_A.cols) * sizeof(float)));
float *rWork; cudaCHECK(hipMalloc(&rWork, 1 * sizeof(float)));
int *d_devInfo; cudaCHECK(hipMalloc(&d_devInfo, sizeof(int)));
// --- Compute SVD
hipsolverDnHandle_t cuSolverHandle;
cuSolverCHECK(hipsolverDnCreate(&cuSolverHandle));
int workSize = 0;
cuSolverCHECK(hipsolverDnSgesvd_bufferSize(cuSolverHandle, d_A.cols, d_A.rows, &workSize));
float *workArray; cudaCHECK(hipMalloc(&workArray, workSize * sizeof(float)));
cuSolverCHECK(hipsolverDnSgesvd(
cuSolverHandle,
'A',
'A',
d_A.cols,
d_A.rows,
(float *)d_A.data,
d_A.step1(),
d_S,
d_U,
d_A.cols,
d_V,
d_A.rows,
workArray,
workSize,
rWork,
d_devInfo));
int h_devInfo = 0;
cudaCHECK(hipMemcpy(&h_devInfo, d_devInfo, sizeof(int), hipMemcpyDeviceToHost));
if (h_devInfo == 0) printf("SVD converged \n");
else if (h_devInfo < 0) {
printf("%d-th parameter is wrong \n", -h_devInfo);
exit(1);
}
else {
printf("WARNING: h_devInfo = %d : SVD did not converge \n", h_devInfo);
}
/***********/
/* STEP #5 */
/***********/
cv::cuda::GpuMat d_Umat(d_A.cols, d_A.cols, CV_32FC1, d_U);
d_Umat(cv::Range(0, numEigenfaces), cv::Range(0, d_Umat.rows)).copyTo(d_Umat);
/***********/
/* STEP #6 */
/***********/
cv::cuda::GpuMat d_features(numEigenfaces, numTrain, CV_32FC1);
cv::cuda::gemm(d_Umat, d_A2, 1.f, d_features, 0.f, d_features, cv::GEMM_2_T);
/***********/
/* STEP #7 */
/***********/
// --- Load test image
std::stringstream ss;
ss << std::setw(3) << std::setfill('0') << 19;
std::string s = ss.str();
h_imageTemp = cv::imread(pathToData + "seanConneryTestImage.png", -1);
cv::transpose(h_imageTemp, h_imageTemp);
h_imageTemp.convertTo(h_imageTempCast, CV_32FC1);
h_imageTempCast = h_imageTempCast.reshape(0, 1);
std::string ty = cv::typeToString(h_imageTemp.type());
//ty = cv::typeToString(h_imageTemp.type());
printf("Loaded image: %s %dx%d \n", ty.c_str(), h_imageTemp.rows, h_imageTemp.cols);
ty = cv::typeToString(h_imageTempCast.type());
printf("Cast image: %s %dx%d \n", ty.c_str(), h_imageTempCast.rows, h_imageTempCast.cols);
cv::resize(h_imageTemp, h_imageTempResized, cv::Size(nRows * cRows, nCols * cCols), cv::INTER_CUBIC);
cv::transpose(h_imageTempResized, h_imageTempResized);
cv::imshow("Test image", h_imageTempResized);
cv::waitKey(0);
// --- Copy generic row
cv::cuda::GpuMat d_testImage(1, nRows * nCols, CV_32FC1);
rowPointer = d_testImage.ptr<float>(0);
cudaCHECK(hipMemcpy2D(rowPointer,
d_testImage.step * sizeof(float),
h_imageTempCast.ptr<float>(0),
h_imageTempCast.step * sizeof(float),
h_imageTempCast.cols * sizeof(float),
1,
hipMemcpyHostToDevice));
// --- Subtract the mean database image from the test image
cv::cuda::subtract(d_testImage, d_mean, d_testImage);
// --- Compute the feature vector of the test image
cv::cuda::GpuMat d_featureVec(numEigenfaces, 1, CV_32FC1);
cv::cuda::gemm(d_Umat, d_testImage, 1.f, d_featureVec, 0.f, d_featureVec, cv::GEMM_2_T);
/***********/
/* STEP #8 */
/***********/
cv::cuda::GpuMat d_temp(numEigenfaces, 1, CV_32FC1);
cv::cuda::GpuMat d_similarityScores(numTrain, 1, CV_32FC1);
for (int t = 0; t < numTrain; t++) {
d_features(cv::Range(0, numEigenfaces), cv::Range(t, t + 1)).copyTo(d_temp);
cv::cuda::subtract(d_temp, d_featureVec, d_temp);
cv::cuda::sqr(d_temp, d_temp);
cv::cuda::reduce(d_temp, d_similarityScores.row(t), 0, 0);
}
double minVal, maxVal;
cv::Point minLoc, maxLoc;
cv::cuda::minMaxLoc(d_similarityScores, &minVal, &maxVal, &minLoc, &maxLoc);
std::cout << minVal << " " << maxVal << " " << minLoc << " " << maxLoc << "\n";
std::stringstream ss2;
ss2 << std::setw(3) << std::setfill('0') << minLoc.y;
std::string s2 = ss2.str();
std::cout << pathToData + s2 + ".png" << std::endl;
cv::Mat h_recognizedImage, h_recognizedImageResized;
h_recognizedImage = cv::imread(pathToData + s2 + ".png", -1);
cv::resize(h_recognizedImage, h_recognizedImageResized, cv::Size(nRows * cRows, nCols * cCols), cv::INTER_CUBIC);
cv::imshow("Recognized image", h_recognizedImageResized);
cv::waitKey(0);
}
| 1b8e24a579cda800ad1664bdbf6305c3bdb93644.cu | #include <opencv2\opencv.hpp>
#include <cusolverDn.h>
//#define SHOW_DATASET
//#define SHOW_AVERAGE
//#define SHOW_SHIFTED_IMAGES
#define BLOCKSIZE_X 16
#define BLOCKSIZE_Y 16
/******************/
/* ERROR CHECKING */
/******************/
#define cudaCHECK(ans) { checkAssert((ans), __FILE__, __LINE__); }
inline void checkAssert(cudaError_t errorCode, const char *file, int line, bool abort = true)
{
if (errorCode != cudaSuccess)
{
fprintf(stderr, "Check assert: %s %s %d\n", cudaGetErrorString(errorCode), file, line);
if (abort) exit(errorCode);
}
}
/***************************/
/* cuSOLVER ERROR CHECKING */
/***************************/
static const char *_cuSolverReturnErrorString(cusolverStatus_t errorCode)
{
switch (errorCode) {
case CUSOLVER_STATUS_SUCCESS: return "cuSolver successful call";
case CUSOLVER_STATUS_NOT_INITIALIZED: return "cuSolver is not initialized";
case CUSOLVER_STATUS_ALLOC_FAILED: return "cuSolver internal resource allocation failed";
case CUSOLVER_STATUS_INVALID_VALUE: return "cuSolver function has an unsupported value or parameter";
case CUSOLVER_STATUS_ARCH_MISMATCH: return "cuSolver function requires an unsupported architecture feature";
case CUSOLVER_STATUS_EXECUTION_FAILED: return "cuSolver function failed to execute";
case CUSOLVER_STATUS_INTERNAL_ERROR: return "cuSolver internal operation failed";
case CUSOLVER_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "Matrix type not supported";
}
return "<unknown>";
}
inline void __cuSolverCHECK(cusolverStatus_t errorCode, const char *file, const int line)
{
if (CUSOLVER_STATUS_SUCCESS != errorCode) {
fprintf(stderr, "cuSolver was unsuccessful in file '%s', line %d; the reported errorCodeor is: %s \nterminating!\n", __FILE__, __LINE__, \
_cuSolverReturnErrorString(errorCode)); \
assert(0); \
}
}
void cuSolverCHECK(cusolverStatus_t errorCode) { __cuSolverCHECK(errorCode, __FILE__, __LINE__); }
/*******************/
/* iDivUp FUNCTION */
/*******************/
int iDivUp(int a, int b) { return (a % b != 0) ? (a / b + 1) : (a / b); }
/**********************/
/* REMOVE MEAN KERNEL */
/**********************/
__global__ void removeMeanKernel(const float * __restrict__ srcPtr, float * __restrict__ dstPtr, const size_t srcStep, const size_t dstStep, const int Nrows, const int Ncols) {
int rowIdx = blockIdx.y * blockDim.y + threadIdx.y;
int colIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (rowIdx >= Nrows || colIdx >= Ncols) return;
const float *rowSrcPtr = (const float *)(((char *)srcPtr) + 0 * srcStep);
float *rowDstPtr = (float *)(((char *)dstPtr) + rowIdx * dstStep);
rowDstPtr[colIdx] = rowDstPtr[colIdx] - rowSrcPtr[colIdx];
}
/********/
/* MAIN */
/********/
int main() {
// --- Customized
const int numImages = 24;
const int nRows = 64;
const int nCols = 64;
const int numTrain = 24;
const int cRows = 10;
const int cCols = 10;
const int numEigenfaces = 7;
/***********/
/* STEP #1 */
/***********/
// --- Image path
std::string pathToData("D:\\Project\\Packt\\Eigenfaces\\Customized\\");
// --- GPU memory allocation
cv::cuda::GpuMat d_A(numTrain, nRows * nCols, CV_32FC1);
// --- Loading dataset images
cv::Mat h_imageTemp, h_imageTempCast, h_imageTempResized;
float *rowPointer;
for (int k = 0; k < numImages; k++) {
std::stringstream ss;
ss << std::setw(3) << std::setfill('0') << k;
std::string s = ss.str();
h_imageTemp = cv::imread(pathToData + s + ".png", -1);
cv::transpose(h_imageTemp, h_imageTemp);
h_imageTemp.convertTo(h_imageTempCast, CV_32FC1);
h_imageTempCast = h_imageTempCast.reshape(0, 1);
std::string ty = cv::typeToString(h_imageTemp.type());
printf("Loaded image: %s %dx%d \n", ty.c_str(), h_imageTemp.rows, h_imageTemp.cols);
ty = cv::typeToString(h_imageTempCast.type());
printf("Cast image: %s %dx%d \n", ty.c_str(), h_imageTempCast.rows, h_imageTempCast.cols);
#ifdef SHOW_DATASET
cv::resize(h_imageTemp, h_imageTempResized, cv::Size(nRows * cRows, nCols * cCols), cv::INTER_CUBIC);
cv::imshow("Dataset image", h_imageTempResized);
cv::waitKey(0);
#endif
// --- Copy generic row
rowPointer = d_A.ptr<float>(k);
cudaCHECK(cudaMemcpy2D(rowPointer,
d_A.step * sizeof(float),
h_imageTempCast.ptr<float>(0),
h_imageTempCast.step * sizeof(float),
h_imageTempCast.cols * sizeof(float),
1,
cudaMemcpyHostToDevice));
}
/***********/
/* STEP #2 */
/***********/
// --- Average
cv::cuda::GpuMat d_mean(1, nRows * nCols, CV_32FC1);
cv::cuda::reduce(d_A, d_mean, 0, 1);
#ifdef SHOW_AVERAGE
cv::Mat h_mean(d_mean);
h_mean = h_mean.reshape(0, nRows);
h_mean.convertTo(h_mean, CV_8UC1);
cv::resize(h_mean, h_mean, cv::Size(cRows * nRows, cCols * nCols), cv::INTER_CUBIC);
std::string ty = cv::typeToString(h_mean.type());
printf("Average image: %s %dx%d \n", ty.c_str(), h_mean.rows, h_mean.cols);
cv::imshow("Average image", h_mean);
cv::waitKey(0);
#endif
/***********/
/* STEP #3 */
/***********/
// --- Shift images
dim3 blockDim(BLOCKSIZE_X, BLOCKSIZE_Y);
dim3 gridDim(iDivUp(d_A.cols, BLOCKSIZE_X), iDivUp(d_A.rows, BLOCKSIZE_Y));
removeMeanKernel << <gridDim, blockDim >> > ((float *)d_mean.data, (float *)d_A.data, d_mean.step, d_A.step, numTrain, nRows * nCols);
cudaCHECK(cudaPeekAtLastError());
cudaCHECK(cudaDeviceSynchronize());
#ifdef SHOW_SHIFTED_IMAGES
count = 0;
for (int k = 0; k < numImages; k++) {
cv::Mat h_shiftedImage(1, nRows * nCols, CV_32FC1);
// --- Copy generic row
rowPointer = d_A.ptr<float>(count);
cudaCHECK(cudaMemcpy2D(h_shiftedImage.ptr<float>(0),
h_shiftedImage.step * sizeof(float),
rowPointer,
d_A.step * sizeof(float),
d_A.cols * sizeof(float),
1,
cudaMemcpyDeviceToHost));
h_shiftedImage = h_shiftedImage.reshape(0, nRows);
h_shiftedImage.convertTo(h_shiftedImage, CV_8UC1);
std::string ty = cv::typeToString(h_shiftedImage.type());
printf("Removed mean image: %s %dx%d \n", ty.c_str(), h_shiftedImage.rows, h_shiftedImage.cols);
cv::resize(h_shiftedImage, h_shiftedImage, cv::Size(nRows * cRows, nCols * cCols), cv::INTER_CUBIC);
cv::imshow("Dataset image", h_shiftedImage);
cv::waitKey(0);
count++;
}
#endif
/***********/
/* STEP #4 */
/***********/
cv::cuda::GpuMat d_A2;
d_A.copyTo(d_A2);
cv::cuda::multiply(d_A, 1.f / sqrt((float)numTrain), d_A);
// --- Allocating SVD space on the device
float *d_U; cudaCHECK(cudaMalloc(&d_U, d_A.cols * d_A.cols * sizeof(float)));
float *d_V; cudaCHECK(cudaMalloc(&d_V, d_A.rows * d_A.rows * sizeof(float)));
float *d_S; cudaCHECK(cudaMalloc(&d_S, min(d_A.rows, d_A.cols) * sizeof(float)));
float *rWork; cudaCHECK(cudaMalloc(&rWork, 1 * sizeof(float)));
int *d_devInfo; cudaCHECK(cudaMalloc(&d_devInfo, sizeof(int)));
// --- Compute SVD
cusolverDnHandle_t cuSolverHandle;
cuSolverCHECK(cusolverDnCreate(&cuSolverHandle));
int workSize = 0;
cuSolverCHECK(cusolverDnSgesvd_bufferSize(cuSolverHandle, d_A.cols, d_A.rows, &workSize));
float *workArray; cudaCHECK(cudaMalloc(&workArray, workSize * sizeof(float)));
cuSolverCHECK(cusolverDnSgesvd(
cuSolverHandle,
'A',
'A',
d_A.cols,
d_A.rows,
(float *)d_A.data,
d_A.step1(),
d_S,
d_U,
d_A.cols,
d_V,
d_A.rows,
workArray,
workSize,
rWork,
d_devInfo));
int h_devInfo = 0;
cudaCHECK(cudaMemcpy(&h_devInfo, d_devInfo, sizeof(int), cudaMemcpyDeviceToHost));
if (h_devInfo == 0) printf("SVD converged \n");
else if (h_devInfo < 0) {
printf("%d-th parameter is wrong \n", -h_devInfo);
exit(1);
}
else {
printf("WARNING: h_devInfo = %d : SVD did not converge \n", h_devInfo);
}
/***********/
/* STEP #5 */
/***********/
cv::cuda::GpuMat d_Umat(d_A.cols, d_A.cols, CV_32FC1, d_U);
d_Umat(cv::Range(0, numEigenfaces), cv::Range(0, d_Umat.rows)).copyTo(d_Umat);
/***********/
/* STEP #6 */
/***********/
cv::cuda::GpuMat d_features(numEigenfaces, numTrain, CV_32FC1);
cv::cuda::gemm(d_Umat, d_A2, 1.f, d_features, 0.f, d_features, cv::GEMM_2_T);
/***********/
/* STEP #7 */
/***********/
// --- Load test image
std::stringstream ss;
ss << std::setw(3) << std::setfill('0') << 19;
std::string s = ss.str();
h_imageTemp = cv::imread(pathToData + "seanConneryTestImage.png", -1);
cv::transpose(h_imageTemp, h_imageTemp);
h_imageTemp.convertTo(h_imageTempCast, CV_32FC1);
h_imageTempCast = h_imageTempCast.reshape(0, 1);
std::string ty = cv::typeToString(h_imageTemp.type());
//ty = cv::typeToString(h_imageTemp.type());
printf("Loaded image: %s %dx%d \n", ty.c_str(), h_imageTemp.rows, h_imageTemp.cols);
ty = cv::typeToString(h_imageTempCast.type());
printf("Cast image: %s %dx%d \n", ty.c_str(), h_imageTempCast.rows, h_imageTempCast.cols);
cv::resize(h_imageTemp, h_imageTempResized, cv::Size(nRows * cRows, nCols * cCols), cv::INTER_CUBIC);
cv::transpose(h_imageTempResized, h_imageTempResized);
cv::imshow("Test image", h_imageTempResized);
cv::waitKey(0);
// --- Copy generic row
cv::cuda::GpuMat d_testImage(1, nRows * nCols, CV_32FC1);
rowPointer = d_testImage.ptr<float>(0);
cudaCHECK(cudaMemcpy2D(rowPointer,
d_testImage.step * sizeof(float),
h_imageTempCast.ptr<float>(0),
h_imageTempCast.step * sizeof(float),
h_imageTempCast.cols * sizeof(float),
1,
cudaMemcpyHostToDevice));
// --- Subtract the mean database image from the test image
cv::cuda::subtract(d_testImage, d_mean, d_testImage);
// --- Compute the feature vector of the test image
cv::cuda::GpuMat d_featureVec(numEigenfaces, 1, CV_32FC1);
cv::cuda::gemm(d_Umat, d_testImage, 1.f, d_featureVec, 0.f, d_featureVec, cv::GEMM_2_T);
/***********/
/* STEP #8 */
/***********/
cv::cuda::GpuMat d_temp(numEigenfaces, 1, CV_32FC1);
cv::cuda::GpuMat d_similarityScores(numTrain, 1, CV_32FC1);
for (int t = 0; t < numTrain; t++) {
d_features(cv::Range(0, numEigenfaces), cv::Range(t, t + 1)).copyTo(d_temp);
cv::cuda::subtract(d_temp, d_featureVec, d_temp);
cv::cuda::sqr(d_temp, d_temp);
cv::cuda::reduce(d_temp, d_similarityScores.row(t), 0, 0);
}
double minVal, maxVal;
cv::Point minLoc, maxLoc;
cv::cuda::minMaxLoc(d_similarityScores, &minVal, &maxVal, &minLoc, &maxLoc);
std::cout << minVal << " " << maxVal << " " << minLoc << " " << maxLoc << "\n";
std::stringstream ss2;
ss2 << std::setw(3) << std::setfill('0') << minLoc.y;
std::string s2 = ss2.str();
std::cout << pathToData + s2 + ".png" << std::endl;
cv::Mat h_recognizedImage, h_recognizedImageResized;
h_recognizedImage = cv::imread(pathToData + s2 + ".png", -1);
cv::resize(h_recognizedImage, h_recognizedImageResized, cv::Size(nRows * cRows, nCols * cCols), cv::INTER_CUBIC);
cv::imshow("Recognized image", h_recognizedImageResized);
cv::waitKey(0);
}
|
8507eebf9b2e8faf810f3d47582c97a86d70f8d3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "timer.h"
#include "files.h"
#define SOFTENING 1e-9f
/*
* Each body contains x, y, and z coordinate positions,
* as well as velocities in the x, y, and z directions.
*/
typedef struct {
float x, y, z, vx, vy, vz;
} Body;
/*
* Calculate the gravitational impact of all bodies in the system
* on all others.
*/
int *max_count;
__global__
void bodyForce(Body *p, float dt, int n, int *max) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
//for (int i = 0; i < n; ++i) {
if (i < n) {
float Fx = 0.0f; float Fy = 0.0f; float Fz = 0.0f;
for (int j = 0; j < n; j++) {
float dx = p[j].x - p[i].x;
float dy = p[j].y - p[i].y;
float dz = p[j].z - p[i].z;
float distSqr = dx*dx + dy*dy + dz*dz + SOFTENING;
float invDist = rsqrtf(distSqr);
float invDist3 = invDist * invDist * invDist;
Fx += dx * invDist3;
Fy += dy * invDist3;
Fz += dz * invDist3;
}
p[i].vx += dt*Fx;
p[i].vy += dt*Fy;
p[i].vz += dt*Fz;
(*max)++;
}
}
__global__
void integrate(Body *p, float dt, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
p[i].x += p[i].vx*dt;
p[i].y += p[i].vy*dt;
p[i].z += p[i].vz*dt;
}
}
int main(const int argc, const char** argv) {
int deviceId;
hipGetDevice(&deviceId);
hipError_t err, sync_err, async_err;
// The assessment will test against both 2<11 and 2<15.
// Feel free to pass the command line argument 15 when you gernate ./nbody report files
int nBodies = 2<<11;
if (argc > 1) nBodies = 2<<atoi(argv[1]);
// The assessment will pass hidden initialized values to check for correctness.
// You should not make changes to these files, or else the assessment will not work.
const char * initialized_values;
const char * solution_values;
if (nBodies == 2<<11) {
initialized_values = "files/initialized_4096";
solution_values = "files/solution_4096";
} else { // nBodies == 2<<15
initialized_values = "files/initialized_65536";
solution_values = "files/solution_65536";
}
if (argc > 2) initialized_values = argv[2];
if (argc > 3) solution_values = argv[3];
const float dt = 0.01f; // Time step
const int nIters = 10; // Simulation iterations
int bytes = nBodies * sizeof(Body);
float *buf;
//buf = (float *)malloc(bytes);
err = hipMallocManaged(&buf, bytes);
if (err != hipSuccess)
{
printf("cuda Malloc failed: %s\n", hipGetErrorString(err));
return -1;
}
hipMallocManaged(&max_count, sizeof(int));
*max_count = 0;
Body *p = (Body*)buf;
hipMemPrefetchAsync(buf, bytes, hipCpuDeviceId);
read_values_from_file(initialized_values, buf, bytes);
double totalTime = 0.0;
/*
* This simulation will run for 10 cycles of time, calculating gravitational
* interaction amongst bodies, and adjusting their positions to reflect.
*/
for (int iter = 0; iter < nIters; iter++) {
StartTimer();
int threads_per_block = 128;
int number_of_blocks = (nBodies / threads_per_block);
hipMemPrefetchAsync(buf, bytes, deviceId);
hipLaunchKernelGGL(( bodyForce) , dim3(number_of_blocks), dim3(threads_per_block) , 0, 0, p, dt, nBodies, max_count );
sync_err = hipGetLastError();
async_err = hipDeviceSynchronize();
if (sync_err != hipSuccess || async_err != hipSuccess) {
printf("Sync error: %s\n", hipGetErrorString(sync_err));
printf("Async error: %s\n", hipGetErrorString(async_err));
}
/*
* This position integration cannot occur until this round of `bodyForce` has completed.
* Also, the next round of `bodyForce` cannot begin until the integration is complete.
*/
hipLaunchKernelGGL(( integrate) , dim3(number_of_blocks), dim3(threads_per_block) , 0, 0, p, dt, nBodies );
sync_err = hipGetLastError();
async_err = hipDeviceSynchronize();
if (sync_err != hipSuccess || async_err != hipSuccess) {
printf("i-Sync error: %s\n", hipGetErrorString(sync_err));
printf("i-Async error: %s\n", hipGetErrorString(async_err));
}
const double tElapsed = GetTimer() / 1000.0;
totalTime += tElapsed;
}
hipMemPrefetchAsync(buf, bytes, hipCpuDeviceId);
double avgTime = totalTime / (double)(nIters);
float billionsOfOpsPerSecond = 1e-9 * nBodies * nBodies / avgTime;
write_values_to_file(solution_values, buf, bytes);
// You will likely enjoy watching this value grow as you accelerate the application,
// but beware that a failure to correctly synchronize the device might result in
// unrealistically high values.
printf("%0.3f Billion Interactions / second", billionsOfOpsPerSecond);
printf("\n nBodies: %d, Max Count: %d\n", nBodies, *max_count);
hipFree(max_count);
hipFree(buf);
}
# Standalone compilation: nvcc -std=c++11 21-nbody.cu
| 8507eebf9b2e8faf810f3d47582c97a86d70f8d3.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "timer.h"
#include "files.h"
#define SOFTENING 1e-9f
/*
* Each body contains x, y, and z coordinate positions,
* as well as velocities in the x, y, and z directions.
*/
typedef struct {
float x, y, z, vx, vy, vz;
} Body;
/*
* Calculate the gravitational impact of all bodies in the system
* on all others.
*/
int *max_count;
__global__
void bodyForce(Body *p, float dt, int n, int *max) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
//for (int i = 0; i < n; ++i) {
if (i < n) {
float Fx = 0.0f; float Fy = 0.0f; float Fz = 0.0f;
for (int j = 0; j < n; j++) {
float dx = p[j].x - p[i].x;
float dy = p[j].y - p[i].y;
float dz = p[j].z - p[i].z;
float distSqr = dx*dx + dy*dy + dz*dz + SOFTENING;
float invDist = rsqrtf(distSqr);
float invDist3 = invDist * invDist * invDist;
Fx += dx * invDist3;
Fy += dy * invDist3;
Fz += dz * invDist3;
}
p[i].vx += dt*Fx;
p[i].vy += dt*Fy;
p[i].vz += dt*Fz;
(*max)++;
}
}
__global__
void integrate(Body *p, float dt, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
p[i].x += p[i].vx*dt;
p[i].y += p[i].vy*dt;
p[i].z += p[i].vz*dt;
}
}
int main(const int argc, const char** argv) {
int deviceId;
cudaGetDevice(&deviceId);
cudaError_t err, sync_err, async_err;
// The assessment will test against both 2<11 and 2<15.
// Feel free to pass the command line argument 15 when you gernate ./nbody report files
int nBodies = 2<<11;
if (argc > 1) nBodies = 2<<atoi(argv[1]);
// The assessment will pass hidden initialized values to check for correctness.
// You should not make changes to these files, or else the assessment will not work.
const char * initialized_values;
const char * solution_values;
if (nBodies == 2<<11) {
initialized_values = "files/initialized_4096";
solution_values = "files/solution_4096";
} else { // nBodies == 2<<15
initialized_values = "files/initialized_65536";
solution_values = "files/solution_65536";
}
if (argc > 2) initialized_values = argv[2];
if (argc > 3) solution_values = argv[3];
const float dt = 0.01f; // Time step
const int nIters = 10; // Simulation iterations
int bytes = nBodies * sizeof(Body);
float *buf;
//buf = (float *)malloc(bytes);
err = cudaMallocManaged(&buf, bytes);
if (err != cudaSuccess)
{
printf("cuda Malloc failed: %s\n", cudaGetErrorString(err));
return -1;
}
cudaMallocManaged(&max_count, sizeof(int));
*max_count = 0;
Body *p = (Body*)buf;
cudaMemPrefetchAsync(buf, bytes, cudaCpuDeviceId);
read_values_from_file(initialized_values, buf, bytes);
double totalTime = 0.0;
/*
* This simulation will run for 10 cycles of time, calculating gravitational
* interaction amongst bodies, and adjusting their positions to reflect.
*/
for (int iter = 0; iter < nIters; iter++) {
StartTimer();
int threads_per_block = 128;
int number_of_blocks = (nBodies / threads_per_block);
cudaMemPrefetchAsync(buf, bytes, deviceId);
bodyForce <<< number_of_blocks, threads_per_block >>> ( p, dt, nBodies, max_count );
sync_err = cudaGetLastError();
async_err = cudaDeviceSynchronize();
if (sync_err != cudaSuccess || async_err != cudaSuccess) {
printf("Sync error: %s\n", cudaGetErrorString(sync_err));
printf("Async error: %s\n", cudaGetErrorString(async_err));
}
/*
* This position integration cannot occur until this round of `bodyForce` has completed.
* Also, the next round of `bodyForce` cannot begin until the integration is complete.
*/
integrate <<< number_of_blocks, threads_per_block >>> ( p, dt, nBodies );
sync_err = cudaGetLastError();
async_err = cudaDeviceSynchronize();
if (sync_err != cudaSuccess || async_err != cudaSuccess) {
printf("i-Sync error: %s\n", cudaGetErrorString(sync_err));
printf("i-Async error: %s\n", cudaGetErrorString(async_err));
}
const double tElapsed = GetTimer() / 1000.0;
totalTime += tElapsed;
}
cudaMemPrefetchAsync(buf, bytes, cudaCpuDeviceId);
double avgTime = totalTime / (double)(nIters);
float billionsOfOpsPerSecond = 1e-9 * nBodies * nBodies / avgTime;
write_values_to_file(solution_values, buf, bytes);
// You will likely enjoy watching this value grow as you accelerate the application,
// but beware that a failure to correctly synchronize the device might result in
// unrealistically high values.
printf("%0.3f Billion Interactions / second", billionsOfOpsPerSecond);
printf("\n nBodies: %d, Max Count: %d\n", nBodies, *max_count);
cudaFree(max_count);
cudaFree(buf);
}
# Standalone compilation: nvcc -std=c++11 21-nbody.cu
|
17db819d376c3138ab39e1782b695128b1dbf6fa.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "ComputeOffsetOfMatrixB.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int32_t *row_sum = NULL;
hipMalloc(&row_sum, XSIZE*YSIZE);
int32_t *output = NULL;
hipMalloc(&output, XSIZE*YSIZE);
int32_t N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
ComputeOffsetOfMatrixB), dim3(gridBlock),dim3(threadBlock), 0, 0, row_sum,output,N);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
ComputeOffsetOfMatrixB), dim3(gridBlock),dim3(threadBlock), 0, 0, row_sum,output,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
ComputeOffsetOfMatrixB), dim3(gridBlock),dim3(threadBlock), 0, 0, row_sum,output,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 17db819d376c3138ab39e1782b695128b1dbf6fa.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "ComputeOffsetOfMatrixB.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int32_t *row_sum = NULL;
cudaMalloc(&row_sum, XSIZE*YSIZE);
int32_t *output = NULL;
cudaMalloc(&output, XSIZE*YSIZE);
int32_t N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
ComputeOffsetOfMatrixB<<<gridBlock,threadBlock>>>(row_sum,output,N);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
ComputeOffsetOfMatrixB<<<gridBlock,threadBlock>>>(row_sum,output,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
ComputeOffsetOfMatrixB<<<gridBlock,threadBlock>>>(row_sum,output,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
ba67695ba69da415f35eb986d8341d65eaea1b31.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "floyd2DKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *M = NULL;
hipMalloc(&M, XSIZE*YSIZE);
const int nverts = 1;
const int k = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
floyd2DKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, M,nverts,k);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
floyd2DKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, M,nverts,k);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
floyd2DKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, M,nverts,k);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ba67695ba69da415f35eb986d8341d65eaea1b31.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "floyd2DKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *M = NULL;
cudaMalloc(&M, XSIZE*YSIZE);
const int nverts = 1;
const int k = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
floyd2DKernel<<<gridBlock,threadBlock>>>(M,nverts,k);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
floyd2DKernel<<<gridBlock,threadBlock>>>(M,nverts,k);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
floyd2DKernel<<<gridBlock,threadBlock>>>(M,nverts,k);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
44eb9fd3ddb39de6aec3220eea213c2ff97a1107.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* The MIT License (MIT)
* Copyright (c) 2021, NVIDIA CORPORATION.
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
******************************************************************************/
#define CUB_NS_PREFIX namespace kaolin {
#define CUB_NS_POSTFIX }
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include "SPC.h"
#include <hipcub/hipcub.hpp>
using namespace torch::indexing;
extern inline void cudaPrintError(const char* file, const int line);
#define CUDA_PRINT_ERROR() cudaPrintError(__FILE__, __LINE__)
SPC::SPC()
{}
SPC::~SPC()
{}
void SPC::SaveNPZ(std::string filename)
{
torch::Tensor OctreeCPU = m_Octree.to(torch::kCPU);
uint m_Osize = GetOSize();
cnpy::npz_save(filename, "octree", reinterpret_cast<uchar*>(OctreeCPU.data_ptr<uchar>()), { m_Osize }, "w");
}
void SPC::LoadNPZ(std::string filename)
{
cnpy::npz_t F = cnpy::npz_load(filename);
cnpy::NpyArray O = F["octree"];
uchar* octree = O.data<uchar>();
m_Osize = O.num_vals;
m_Octree = torch::zeros({m_Osize}, torch::device(torch::kCUDA).dtype(torch::kByte));
uchar* octreeT = reinterpret_cast<uchar*>(m_Octree.data_ptr<uchar>());
hipMemcpy(octreeT, octree, m_Osize, hipMemcpyHostToDevice);
std::vector<at::Tensor> tmp;
tmp = SetGeometry(m_Octree);
m_Points = tmp[0];
m_Pyramid = tmp[1];
uint* h_pyramid = reinterpret_cast<uint*>(m_Pyramid.data_ptr<int>());
m_Level = m_Pyramid.size(1) - 2;
m_Psize = h_pyramid[m_Level];
}
__global__ void MortonToPoint(
const uint Psize,
morton_code *DataIn,
point_data *DataOut)
{
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < Psize)
DataOut[tidx] = ToPoint(DataIn[tidx]);
}
__global__ void NodesToMorton(
const uint Psize,
const uchar *d_Odata,
const uint * d_PrefixSum,
const morton_code *d_MdataIn,
morton_code *d_MdataOut)
{
uint tidx = blockIdx.x * 1024 + threadIdx.x;
if (tidx < Psize)
{
uchar bits = d_Odata[tidx];
morton_code code = d_MdataIn[tidx];
int addr = d_PrefixSum[tidx];
for (int i = 7; i >= 0; i--)
if (bits&(0x1 << i))
d_MdataOut[addr--] = 8 * code + i;
}
}
point_data* SPC::GetProotGPU(uint l)
{
point_data* Pdata = reinterpret_cast<point_data*>(m_Points.data_ptr<short>());
uint offset = 0;
auto pyramid_a = m_Pyramid.accessor<int, 2>();
offset = pyramid_a[1][l];
return Pdata + offset;
}
torch::Tensor SPC::GetPoints(uint l)
{
auto pyramid_a = m_Pyramid.accessor<int, 2>();
uint offset = pyramid_a[1][l];
return m_Points.index({Slice(offset, None, None)});
}
__global__ void
d_ScanNodes(
const uint numBytes,
const uchar *d_octree,
uint *d_Info)
{
uint tidx = blockIdx.x * 1024 + threadIdx.x;
if (tidx < numBytes)
d_Info[tidx] = __popc(d_octree[tidx]);
}
std::vector<at::Tensor> SPC::SetGeometry(torch::Tensor Octree)
{
// CHECK_INPUT(odata);
uchar* Odata = Octree.data_ptr<uchar>();
m_Osize = Octree.size(0);
m_Info = torch::zeros({m_Osize+1}, torch::device(torch::kCUDA).dtype(torch::kInt32));
m_PrefixSum = torch::zeros({m_Osize+1}, torch::device(torch::kCUDA).dtype(torch::kInt32));
torch::Tensor Pyramid = torch::zeros({2, MAX_LEVELS+2}, torch::device(torch::kCPU).dtype(torch::kInt32));
uint* d_Info = reinterpret_cast<uint*>(m_Info.data_ptr<int>());
uint* d_PrefixSum = reinterpret_cast<uint*>(m_PrefixSum.data_ptr<int>());
int* h_Pyramid = Pyramid.data_ptr<int>();
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
kaolin::hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_Info, d_PrefixSum, m_Osize+1);
torch::Tensor temp_storage = torch::zeros({(long)temp_storage_bytes}, torch::device(torch::kCUDA).dtype(torch::kByte));
d_temp_storage = (void*)temp_storage.data_ptr<uchar>();
// compute exclusive sum 1 element beyond end of list to get inclusive sum starting at d_PrefixSum+1
d_ScanNodes << < (m_Osize + 1023) / 1024, 1024 >> >(m_Osize, Odata, d_Info);
kaolin::hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_Info, d_PrefixSum, m_Osize+1); // carful with the +1
uint psize;
hipMemcpy(&psize, d_PrefixSum+m_Osize, sizeof(uint), hipMemcpyDeviceToHost);
psize++; //plus one for root
torch::Tensor Points = torch::zeros({psize, 4}, torch::device(torch::kCUDA).dtype(torch::kInt16));
point_data* Pdata = reinterpret_cast<point_data*>(Points.data_ptr<short>());
//TODO: share this memory with Points
torch::Tensor Mortons = torch::zeros({psize}, torch::device(torch::kCUDA).dtype(torch::kInt64));
morton_code* Mdata = reinterpret_cast<morton_code*>(Mortons.data_ptr<int64_t>());
int* pyramid = h_Pyramid;
int* pyramidSum = h_Pyramid + MAX_LEVELS + 2;
uint* S = d_PrefixSum + 1; // this shouldn't matter
morton_code* M = Mdata;
uchar* O = Odata;
morton_code m0 = 0;
hipMemcpy(M, &m0, sizeof(morton_code), hipMemcpyHostToDevice);
int Lsize = 1;
uint currSum, prevSum = 0;
uint sum = pyramid[0] = Lsize;
pyramidSum[0] = 0;
pyramidSum[1] = sum;
int Level = 0;
while (sum <= m_Osize)
{
NodesToMorton << <(Lsize + 1023) / 1024, 1024 >> >(Lsize, O, S, M, Mdata);
O += Lsize;
S += Lsize;
M += Lsize;
hipMemcpy(&currSum, d_PrefixSum + prevSum + 1, sizeof(uint), hipMemcpyDeviceToHost);
Lsize = currSum - prevSum;
prevSum = currSum;
pyramid[++Level] = Lsize;
sum += Lsize;
pyramidSum[Level+1] = sum;
}
uint totalPoints = pyramidSum[Level+1];
MortonToPoint << <(totalPoints + 1023) / 1024, 1024 >> >(totalPoints, Mdata, Pdata);
hipGetLastError();
// assemble output tensors
std::vector<at::Tensor> result;
result.push_back(Points);
result.push_back(Pyramid.index({Slice(None), Slice(None, Level+2)}).contiguous());
hipDeviceSynchronize();
hipError_t err = hipGetLastError();
if ( err != hipSuccess )
{
printf("CUDA Error: %s\n", hipGetErrorString(err));
}
return result;
}
| 44eb9fd3ddb39de6aec3220eea213c2ff97a1107.cu | /******************************************************************************
* The MIT License (MIT)
* Copyright (c) 2021, NVIDIA CORPORATION.
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
******************************************************************************/
#define CUB_NS_PREFIX namespace kaolin {
#define CUB_NS_POSTFIX }
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include "SPC.h"
#include <cub/device/device_scan.cuh>
using namespace torch::indexing;
extern inline void cudaPrintError(const char* file, const int line);
#define CUDA_PRINT_ERROR() cudaPrintError(__FILE__, __LINE__)
SPC::SPC()
{}
SPC::~SPC()
{}
void SPC::SaveNPZ(std::string filename)
{
torch::Tensor OctreeCPU = m_Octree.to(torch::kCPU);
uint m_Osize = GetOSize();
cnpy::npz_save(filename, "octree", reinterpret_cast<uchar*>(OctreeCPU.data_ptr<uchar>()), { m_Osize }, "w");
}
void SPC::LoadNPZ(std::string filename)
{
cnpy::npz_t F = cnpy::npz_load(filename);
cnpy::NpyArray O = F["octree"];
uchar* octree = O.data<uchar>();
m_Osize = O.num_vals;
m_Octree = torch::zeros({m_Osize}, torch::device(torch::kCUDA).dtype(torch::kByte));
uchar* octreeT = reinterpret_cast<uchar*>(m_Octree.data_ptr<uchar>());
cudaMemcpy(octreeT, octree, m_Osize, cudaMemcpyHostToDevice);
std::vector<at::Tensor> tmp;
tmp = SetGeometry(m_Octree);
m_Points = tmp[0];
m_Pyramid = tmp[1];
uint* h_pyramid = reinterpret_cast<uint*>(m_Pyramid.data_ptr<int>());
m_Level = m_Pyramid.size(1) - 2;
m_Psize = h_pyramid[m_Level];
}
__global__ void MortonToPoint(
const uint Psize,
morton_code *DataIn,
point_data *DataOut)
{
uint tidx = blockDim.x * blockIdx.x + threadIdx.x;
if (tidx < Psize)
DataOut[tidx] = ToPoint(DataIn[tidx]);
}
__global__ void NodesToMorton(
const uint Psize,
const uchar *d_Odata,
const uint * d_PrefixSum,
const morton_code *d_MdataIn,
morton_code *d_MdataOut)
{
uint tidx = blockIdx.x * 1024 + threadIdx.x;
if (tidx < Psize)
{
uchar bits = d_Odata[tidx];
morton_code code = d_MdataIn[tidx];
int addr = d_PrefixSum[tidx];
for (int i = 7; i >= 0; i--)
if (bits&(0x1 << i))
d_MdataOut[addr--] = 8 * code + i;
}
}
point_data* SPC::GetProotGPU(uint l)
{
point_data* Pdata = reinterpret_cast<point_data*>(m_Points.data_ptr<short>());
uint offset = 0;
auto pyramid_a = m_Pyramid.accessor<int, 2>();
offset = pyramid_a[1][l];
return Pdata + offset;
}
torch::Tensor SPC::GetPoints(uint l)
{
auto pyramid_a = m_Pyramid.accessor<int, 2>();
uint offset = pyramid_a[1][l];
return m_Points.index({Slice(offset, None, None)});
}
__global__ void
d_ScanNodes(
const uint numBytes,
const uchar *d_octree,
uint *d_Info)
{
uint tidx = blockIdx.x * 1024 + threadIdx.x;
if (tidx < numBytes)
d_Info[tidx] = __popc(d_octree[tidx]);
}
std::vector<at::Tensor> SPC::SetGeometry(torch::Tensor Octree)
{
// CHECK_INPUT(odata);
uchar* Odata = Octree.data_ptr<uchar>();
m_Osize = Octree.size(0);
m_Info = torch::zeros({m_Osize+1}, torch::device(torch::kCUDA).dtype(torch::kInt32));
m_PrefixSum = torch::zeros({m_Osize+1}, torch::device(torch::kCUDA).dtype(torch::kInt32));
torch::Tensor Pyramid = torch::zeros({2, MAX_LEVELS+2}, torch::device(torch::kCPU).dtype(torch::kInt32));
uint* d_Info = reinterpret_cast<uint*>(m_Info.data_ptr<int>());
uint* d_PrefixSum = reinterpret_cast<uint*>(m_PrefixSum.data_ptr<int>());
int* h_Pyramid = Pyramid.data_ptr<int>();
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
kaolin::cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_Info, d_PrefixSum, m_Osize+1);
torch::Tensor temp_storage = torch::zeros({(long)temp_storage_bytes}, torch::device(torch::kCUDA).dtype(torch::kByte));
d_temp_storage = (void*)temp_storage.data_ptr<uchar>();
// compute exclusive sum 1 element beyond end of list to get inclusive sum starting at d_PrefixSum+1
d_ScanNodes << < (m_Osize + 1023) / 1024, 1024 >> >(m_Osize, Odata, d_Info);
kaolin::cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_Info, d_PrefixSum, m_Osize+1); // carful with the +1
uint psize;
cudaMemcpy(&psize, d_PrefixSum+m_Osize, sizeof(uint), cudaMemcpyDeviceToHost);
psize++; //plus one for root
torch::Tensor Points = torch::zeros({psize, 4}, torch::device(torch::kCUDA).dtype(torch::kInt16));
point_data* Pdata = reinterpret_cast<point_data*>(Points.data_ptr<short>());
//TODO: share this memory with Points
torch::Tensor Mortons = torch::zeros({psize}, torch::device(torch::kCUDA).dtype(torch::kInt64));
morton_code* Mdata = reinterpret_cast<morton_code*>(Mortons.data_ptr<int64_t>());
int* pyramid = h_Pyramid;
int* pyramidSum = h_Pyramid + MAX_LEVELS + 2;
uint* S = d_PrefixSum + 1; // this shouldn't matter
morton_code* M = Mdata;
uchar* O = Odata;
morton_code m0 = 0;
cudaMemcpy(M, &m0, sizeof(morton_code), cudaMemcpyHostToDevice);
int Lsize = 1;
uint currSum, prevSum = 0;
uint sum = pyramid[0] = Lsize;
pyramidSum[0] = 0;
pyramidSum[1] = sum;
int Level = 0;
while (sum <= m_Osize)
{
NodesToMorton << <(Lsize + 1023) / 1024, 1024 >> >(Lsize, O, S, M, Mdata);
O += Lsize;
S += Lsize;
M += Lsize;
cudaMemcpy(&currSum, d_PrefixSum + prevSum + 1, sizeof(uint), cudaMemcpyDeviceToHost);
Lsize = currSum - prevSum;
prevSum = currSum;
pyramid[++Level] = Lsize;
sum += Lsize;
pyramidSum[Level+1] = sum;
}
uint totalPoints = pyramidSum[Level+1];
MortonToPoint << <(totalPoints + 1023) / 1024, 1024 >> >(totalPoints, Mdata, Pdata);
cudaGetLastError();
// assemble output tensors
std::vector<at::Tensor> result;
result.push_back(Points);
result.push_back(Pyramid.index({Slice(None), Slice(None, Level+2)}).contiguous());
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if ( err != cudaSuccess )
{
printf("CUDA Error: %s\n", cudaGetErrorString(err));
}
return result;
}
|
6c4d3b60c4f30eaf66d364bcc8334de10086d90a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void kernel_add(float *proj1, float *proj, int iv, int na, int nb, float weight){
int ia = 16 * blockIdx.x + threadIdx.x;
int ib = 16 * blockIdx.y + threadIdx.y;
if (ia >= na || ib >= nb)
return;
proj1[ia + ib * na] += proj[ia + ib * na + iv * na * nb] * weight;
}
// __global__ void kernel_add(hipArray *proj1, hipArray *proj, int iv, int na, int nb, float weight){
// int ia = 16 * blockIdx.x + threadIdx.x;
// int ib = 16 * blockIdx.y + threadIdx.y;
// if (ia >= na || ib >= nb)
// return;
// proj1[ia + ib * na] += proj[ia + ib * na + iv * na * nb] * weight;
// }
| 6c4d3b60c4f30eaf66d364bcc8334de10086d90a.cu | __global__ void kernel_add(float *proj1, float *proj, int iv, int na, int nb, float weight){
int ia = 16 * blockIdx.x + threadIdx.x;
int ib = 16 * blockIdx.y + threadIdx.y;
if (ia >= na || ib >= nb)
return;
proj1[ia + ib * na] += proj[ia + ib * na + iv * na * nb] * weight;
}
// __global__ void kernel_add(cudaArray *proj1, cudaArray *proj, int iv, int na, int nb, float weight){
// int ia = 16 * blockIdx.x + threadIdx.x;
// int ib = 16 * blockIdx.y + threadIdx.y;
// if (ia >= na || ib >= nb)
// return;
// proj1[ia + ib * na] += proj[ia + ib * na + iv * na * nb] * weight;
// }
|
685e7b6b731ca07c62c0c0f91f895e7d7b0cec19.hip | // !!! This is a file automatically generated by hipify!!!
#include "assert.h"
#include "gpuErrorCheck.h"
#include "rsvd.h"
#include <vector>
#include <iostream>
#include <hip/hip_runtime.h>
using namespace std;
/* Tuned settings for float and double */
/*
#define FLOAT
#define real_t float
#define BLK_WIDTH 16
#define BLK_HEIGHT 128
#define NUM_THREADS 128
#define BLK_SIZE (BLK_WIDTH * BLK_HEIGHT)
#define BLK_ROWS (NUM_THREADS / BLK_WIDTH)
#define THREAD_STORAGE BLK_WIDTH
#define TID_L_MASK 0xF
#define TID_U_SHIFT 4
#define ZERO_INIT {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}
*/
#define DOUBLE
#define real_t double
#define BLK_WIDTH 8
#define BLK_HEIGHT 64
#define NUM_THREADS 64
#define BLK_SIZE (BLK_WIDTH * BLK_HEIGHT)
#define BLK_ROWS (NUM_THREADS / BLK_WIDTH)
#define THREAD_STORAGE BLK_WIDTH
#define TID_L_MASK 0x7
#define TID_U_SHIFT 3
#define ZERO_INIT {0,0,0,0,0,0,0,0}
/******************************************* class ****************************************************/
class level_t {
public:
int num_blocks;
int aggregate_blocks;
int block_offset;
int lev;
level_t();
level_t(int nb, int ab, int bo, int l);
};
/* This defines the main matrix data structure and functions to
access and modify it */
class QR_matrix {
public:
/* Input matrix properties */
int m;
int n;
int lda;
int ld_panel_size;
int ldq;
int ldq_panel;
int blks_tall_total;
int blks_wide_total;
int internal_matrix_size;
int total_blocks;
real_t * mat_base;
real_t * Q_base;
/* Current information */
real_t * mat_cur;
real_t * Q;
int m_current;
int n_current;
int blks_tall_cur;
int blks_wide_cur;
vector<level_t*> levels;
/* Constructors*/
QR_matrix();
QR_matrix(real_t * h_A, const int m, const int n, const int lda);
~QR_matrix();
/* Set the matrix in its internal form (transpose) and retrieve it back */
void factor();
void retrieveQ();
void calculate_dimensions(const int m, const int n);
void panelTranspose(const real_t * mat_in, const int m, const int n, const int lda);
void panelTransInv(real_t * mat_out, const int m, const int n, const int lda);
void retrieveR(real_t * mat_out, const int m, const int n, const int lda);
real_t * blockQ(const int l);
/* Update the pointer to the next panel */
void increment(bool levelChagneFlag);
void decrement(bool levelChagneFlag);
int set_levels();
};
/*************************************** CUDA kernels *****************************************************/
__device__ real_t reduce(real_t * u_sh, real_t ub[], real_t col[], real_t * av, int tid_u, int tid_l, int tid)
{
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE ; i++)
ub[i] = u_sh[tid_u + i*BLK_ROWS];
real_t val = (real_t) 0;
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE ; i++)
val += ub[i] * col[i];
if(tid >= (NUM_THREADS/2)) av[tid] = val;
__syncthreads();
if(tid < (NUM_THREADS/2)) av[tid] = av[tid + (NUM_THREADS/2)] + val;
__syncthreads();
val = 0;
#pragma unroll
for(int i = 0 ; i < (BLK_ROWS/2) ; i++)
val += av[tid_l + BLK_WIDTH*i];
return val;
}
__device__ void update(real_t * u_sh, real_t ub[], real_t col[], real_t res, int tid_u)
{
// Rank-1 update
real_t fres = (real_t) 2 * res;
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE ; i++)
col[i] -= (real_t) ub[i] * fres;
}
__device__ void load_a(real_t * a, real_t col[], int tid)
{
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE; i++)
col[i] = a[tid + i*NUM_THREADS];
}
__device__ void load_a_triangles(real_t * a, real_t col[], int tid, int offset_blocks, real_t * A_max)
{
// This is hardcoded for 128x16. Oops!
real_t * a_orig = a;
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE / (BLK_WIDTH/BLK_ROWS); i++) {
if(a < A_max) {
for(int ii = 0 ; ii < (BLK_WIDTH/BLK_ROWS) ; ii++)
col[(BLK_WIDTH/BLK_ROWS)*i + ii ] = a[tid + ii*NUM_THREADS];
}
a += offset_blocks * BLK_SIZE;
}
a = a_orig;
}
__device__ void write_a(real_t * a, real_t col[], int tid)
{
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE ; i++)
a[tid + i*NUM_THREADS] = col[i];
}
__device__ void write_a_triangles(real_t * a, real_t col[], int tid, int offset_blocks, real_t * A_max)
{
// This is hardcoded for 128x16. Oops!
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE/ (BLK_WIDTH/BLK_ROWS); i++) {
if(a < A_max) {
for(int ii = 0 ; ii < (BLK_WIDTH/BLK_ROWS) ; ii++)
a[tid + ii*NUM_THREADS] = col[(BLK_WIDTH/BLK_ROWS)*i + ii ];
}
a += offset_blocks * BLK_SIZE;
}
}
__device__ void load_u(real_t * u_sh, real_t * b, int tid)
{
#pragma unroll
for(int i = 0 ; i < BLK_WIDTH ; i++)
u_sh[tid + i*BLK_HEIGHT] = b[tid + i*BLK_HEIGHT];
__syncthreads();
}
__global__ void hh_update_dense_reverse(real_t * a, real_t * b, int lda_panel, int ldq)
{
__shared__ real_t av[NUM_THREADS];
__shared__ real_t u[BLK_HEIGHT * BLK_WIDTH];
real_t col[THREAD_STORAGE];
real_t ub[THREAD_STORAGE];
real_t * u_sh;
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
// Pretend we are in panel-transpose form
a += blockIdx.x * BLK_SIZE + blockIdx.y * lda_panel;
// Pretend we are in column major form
b += blockIdx.x * BLK_SIZE;
// load in the block
load_a(a, col, tid);
// Load u
u_sh = &u[0];
load_u(u_sh, b, tid);
u_sh = &u[(BLK_WIDTH-1) * BLK_HEIGHT];
// For each Householder vector
for(int j = 0 ; j < BLK_WIDTH; j++) {
// Matrix-vector multiply
real_t res = reduce(u_sh, ub, col, av, tid_u, tid_l, tid);
// Rank-1 update
update(u_sh, ub, col, res, tid_u);
// Go to the next Householder vector
u_sh -= BLK_HEIGHT;
}
// write out the block
write_a(a, col, tid);
}
__device__ void compute_u(real_t * u_sh, real_t col[], real_t norms[], int tid, int tid_u, int tid_l, int j, int row, int m)
{
__shared__ real_t mulby_sh;
__syncthreads();
if(j + row >= m) {
u_sh[tid] = (real_t) 0;
}
else {
if(tid_l == j) {
real_t local = 0.0;
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE ; i++) {
u_sh[tid_u + i*BLK_ROWS] = col[i];
if(tid_u + i*BLK_ROWS > j) local += col[i] * col[i];
}
norms[tid_u] = local;
}
__syncthreads();
if(tid == j)
{
real_t nm2_nminus1 = (real_t)0.0;
#pragma unroll
for(int i = 0 ; i < BLK_ROWS ; i++)
nm2_nminus1 += norms[i];
real_t top_element = u_sh[j];
#ifdef DOUBLE
real_t nm = sqrt(nm2_nminus1 + top_element*top_element);
#endif
#ifdef FLOAT
real_t nm = sqrtf(nm2_nminus1 + top_element*top_element);
#endif
u_sh[j] = top_element = (top_element >= (real_t)0) ? top_element + nm : top_element - nm;
#ifdef DOUBLE
real_t divby = sqrt(nm2_nminus1 + top_element*top_element);
#endif
#ifdef FLOAT
real_t divby = sqrtf(nm2_nminus1 + top_element*top_element);
#endif
mulby_sh = (divby != (real_t) 0) ? ((real_t) 1.0) / divby : (real_t)0;
}
if(tid < j) u_sh[tid] = (real_t) 0;
__syncthreads();
u_sh[tid] *= mulby_sh;
}
__syncthreads();
}
// factor a small matrix block, householder vector is saved on b
__global__ void hh_factor_dense(real_t * a, real_t * b, int m, int lda_panel, int ldq)
{
__shared__ real_t av[NUM_THREADS];
__shared__ real_t u[BLK_HEIGHT];
__shared__ real_t norms[BLK_WIDTH];
real_t col[THREAD_STORAGE];
real_t ub[THREAD_STORAGE];
real_t * u_sh = &u[0];
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
int row = blockIdx.x * BLK_HEIGHT;
// Pretend we are in panel-transpose form
a += blockIdx.x * BLK_SIZE + blockIdx.y * lda_panel;
// Pretend we are in column major form
b += blockIdx.x * BLK_SIZE;
// load in the block
load_a(a, col, tid);
// For each column of a
for(int j = 0 ; j < BLK_WIDTH ; j++) {
// Form (transpose) the u vector
compute_u(u_sh, col, norms, tid, tid_u, tid_l, j, row, m);
// Matrix-vector multiply: res = v' * A(i:m, :);
real_t res = reduce(u_sh, ub, col, av, tid_u, tid_l, tid);
// Rank-1 update: A(j:m, :) -= 2 * v * res;
update(u_sh, ub, col, res, tid_u);
// Write out u
b[tid] = u_sh[tid];
// Go to the next Householder vector
b += BLK_HEIGHT;
}
}
//
__global__ void hh_update_dense(real_t * a, real_t * b, int lda_panel, int ldq, int max_y)
{
__shared__ real_t av[NUM_THREADS];
__shared__ real_t u[BLK_HEIGHT * BLK_WIDTH];
real_t col[THREAD_STORAGE];
real_t ub[THREAD_STORAGE];
real_t * u_sh;
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
// Pretend we are in panel-transpose form
a += blockIdx.x * BLK_SIZE + blockIdx.y * lda_panel;
// Pretend we are in column major form
b += blockIdx.x * BLK_SIZE;
// Load u
u_sh = &u[0];
load_u(u_sh, b, tid);
for(int p = blockIdx.y ; p < max_y ; p += gridDim.y)
{
// load in the block
load_a(a, col, tid);
// For each Householder vector
for(int j = 0 ; j < BLK_WIDTH; j++) {
// Matrix-vector multiply
real_t res = reduce(u_sh, ub, col, av, tid_u, tid_l, tid);
// Rank-1 update
update(u_sh, ub, col, res, tid_u);
// Go to the next Householder vector
u_sh += BLK_HEIGHT;
}
// write out the block
write_a(a, col, tid);
u_sh = &u[0];
a += gridDim.y * lda_panel;
}
}
__global__ void hh_factor_triangle(real_t * a, real_t * b, int m, int lda_panel, int ldq, int offset_blocks, real_t * A_max)
{
__shared__ real_t av[NUM_THREADS];
__shared__ real_t u[BLK_HEIGHT];
__shared__ real_t norms[BLK_WIDTH];
real_t col[THREAD_STORAGE] = ZERO_INIT;
real_t ub[THREAD_STORAGE];
real_t * u_sh = &u[0];
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
int row = blockIdx.x * (BLK_HEIGHT / BLK_WIDTH) * offset_blocks * BLK_HEIGHT;
// Pretend we are in panel-transpose form
a += row * BLK_WIDTH + blockIdx.y * lda_panel;
// Pretend we are in column major form
b += blockIdx.x * BLK_SIZE;
// load in the block
load_a_triangles(a, col, tid, offset_blocks, A_max);
// For each column of a
for(int j = 0 ; j < BLK_WIDTH ; j++) {
// Form (transpose) the u vector
compute_u(u_sh, col, norms, tid, tid_u, tid_l, j, row, m);
// Matrix-vector multiply
real_t res = reduce(u_sh, ub, col, av, tid_u, tid_l, tid);
// Rank-1 update
update(u_sh, ub, col, res, tid_u);
// Write out u
b[tid] = u_sh[tid];
// Go to the next Householder vector
b += BLK_HEIGHT;
}
}
__global__ void hh_update_triangle_reverse(real_t * a, real_t * b, int lda_panel, int ldq, int offset_blocks, real_t * A_max)
{
__shared__ real_t av[NUM_THREADS];
__shared__ real_t u[BLK_HEIGHT * BLK_WIDTH];
real_t col[THREAD_STORAGE] = ZERO_INIT;
real_t ub[THREAD_STORAGE];
real_t * u_sh;
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
// Pretend we are in panel-transpose form
a += blockIdx.x * BLK_ROWS * offset_blocks * BLK_SIZE + blockIdx.y * lda_panel;
// Update A_max in case we are working on a different column
A_max += blockIdx.y * lda_panel;
// Pretend we are in column major form
b += blockIdx.x * BLK_SIZE;
// load in the block
load_a_triangles(a, col, tid, offset_blocks, A_max);
// Load u
u_sh = &u[0];
load_u(u_sh, b, tid);
u_sh = &u[(BLK_WIDTH-1)*BLK_HEIGHT];
// For each Householder vector
for(int j = 0 ; j < BLK_WIDTH; j++) {
// Matrix-vector multiply
real_t res = reduce(u_sh, ub, col, av, tid_u, tid_l, tid);
// Rank-1 update
update(u_sh, ub, col, res, tid_u);
// Go to the next Householder vector
u_sh -= BLK_HEIGHT;
}
// write out the block
write_a_triangles(a, col, tid, offset_blocks, A_max);
}
__global__ void hh_update_triangle(real_t * a, real_t * b, int lda_panel, int ldq, int offset_blocks, real_t * A_max)
{
__shared__ real_t av[NUM_THREADS];
__shared__ real_t u[BLK_HEIGHT * BLK_WIDTH];
real_t col[THREAD_STORAGE] = ZERO_INIT;
real_t ub[THREAD_STORAGE];
real_t * u_sh;
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
// Pretend we are in panel-transpose form
a += blockIdx.x * BLK_ROWS * offset_blocks * BLK_SIZE + blockIdx.y * lda_panel;
// Update the max in case we are on a different panel
A_max += blockIdx.y * lda_panel;
// Pretend we are in column major form
b += blockIdx.x * BLK_SIZE;
// load in the block
load_a_triangles(a, col, tid, offset_blocks, A_max);
// Load u
u_sh = &u[0];
load_u(u_sh, b, tid);
// For each Householder vector
for(int j = 0 ; j < BLK_WIDTH; j++) {
// Matrix-vector multiply
real_t res = reduce(u_sh, ub, col, av, tid_u, tid_l, tid);
// Rank-1 update
update(u_sh, ub, col, res, tid_u);
// Go to the next Householder vector
u_sh += BLK_HEIGHT;
}
// write out the block
write_a_triangles(a, col, tid, offset_blocks, A_max);
}
/* Get the address of a block (i,j) for level l in the Q matrix */
real_t * QR_matrix::blockQ(const int l) {
assert(l <= levels.size());
int agg_blocks = levels[l]->aggregate_blocks;
// Get pointer to the next level
int offset_blocks = agg_blocks * BLK_SIZE;
return Q + offset_blocks;
}
/* Panel transpose of a block
<<< dim3(blks_tall_total, blks_wide_total), BLK_HEIGHT >>> */
__global__ void blockTranspose(real_t * out, const real_t * in,
int ld_panel_size, int m, int n, int ld_col)
{
// Shared memory
__shared__ real_t sh[(BLK_HEIGHT + 1) * BLK_WIDTH];// why + 1? to avoid bank conflict?
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
// Offset the input vector address
in += BLK_HEIGHT * blockIdx.x + BLK_WIDTH * ld_col * blockIdx.y;
out += BLK_SIZE * blockIdx.x + ld_panel_size * blockIdx.y;
// If we are close to the border then this will be < BLK_WIDTH
int n_it = n - BLK_WIDTH * blockIdx.y;
// Load whole block into shared memory into column major
if(tid + BLK_HEIGHT * blockIdx.x < m) {
#pragma unroll
for(int i = 0 ; i < BLK_WIDTH; i++)
sh[tid + i*BLK_HEIGHT+i] = (i < n_it) ? in[tid + i * ld_col] : (real_t) 0;
} else {
#pragma unroll
for(int i = 0 ; i < BLK_WIDTH ; i++)
sh[tid + i*BLK_HEIGHT+i] = (real_t) 0;
}
__syncthreads();
// Load block out of shared memory in transposed form
int off = tid_l * (BLK_HEIGHT+1) + tid_u;
#pragma unroll
for(int i = 0 ; i < BLK_WIDTH ; i++)
{ out[tid + i * BLK_HEIGHT] = sh[off + i * BLK_ROWS]; }
}
/* Panel transpose of the entire matrix (inverse) */
__global__ void trans_inv(real_t * out, const real_t * in, int ld_panel_size, int m, int n, int ld_col)
{
__shared__ real_t sh[(BLK_HEIGHT+1) * BLK_WIDTH];
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
// Offset the output matrix
in += BLK_SIZE * blockIdx.x + ld_panel_size * blockIdx.y;
out += BLK_HEIGHT * blockIdx.x + BLK_WIDTH * ld_col * blockIdx.y;
// In case we run off the end in the n direction
int n_it = n - BLK_WIDTH * blockIdx.y;
n_it = (n_it < BLK_WIDTH) ? n_it : BLK_WIDTH;
// Load block into shared memory in column major
int off = tid_l * (BLK_HEIGHT+1) + tid_u;
#pragma unroll
for(int i = 0 ; i < BLK_WIDTH ; i++)
sh[off + i * BLK_ROWS] = in[tid + i * BLK_HEIGHT];
__syncthreads();
// Wrtite back out
if(tid + BLK_HEIGHT * blockIdx.x >= m) return;
for(int i = 0 ; i < n_it; i++)
out[tid + i * ld_col] = sh[tid + i*BLK_HEIGHT+i];
}
/* Set matrix to identity */
__global__ void set_ident(real_t * A, int ld_panel_size, int m, int n)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
// Offset the input vector
A += (BLK_WIDTH * BLK_WIDTH + ld_panel_size) * bid;
if(tid + BLK_WIDTH * bid >= n) return;
// Set diagonal
int index = tid + tid * BLK_WIDTH;
A[index] = 1.0;
}
/*************************************** Host functions *******************************************/
int orth_CAQR_size(const int m, const int n){
int blks_tall_cur = ((m + BLK_HEIGHT - 1) / BLK_HEIGHT);
//int blks_wide_cur = (n + BLK_WIDTH - 1) / BLK_WIDTH;
int num_blocks = blks_tall_cur;
int tb = num_blocks;
// Add the first block
level_t *lev = new level_t(num_blocks, 0, 0, 0);
vector<level_t*> levels;
levels.push_back(lev);
int block_offset = 1;
int levnum = 1;
while(num_blocks > 1) {
num_blocks = (num_blocks + (BLK_HEIGHT/BLK_WIDTH) - 1) / (BLK_HEIGHT / BLK_WIDTH);
lev = new level_t(num_blocks, tb, block_offset, levnum);
levels.push_back(lev);
block_offset *= (BLK_HEIGHT/BLK_WIDTH);
levnum++;
tb += num_blocks;
}
//int blks_tall_total = ((m + BLK_HEIGHT - 1) / BLK_HEIGHT) + 1; // + 1 is necessary! find out why!
//int ld_panel_size = blks_tall_total * BLK_WIDTH * BLK_HEIGHT;
int ldq = tb * BLK_HEIGHT;
int blks_wide_total = (n + BLK_WIDTH - 1) / BLK_WIDTH;
int ldq_panel = ldq * BLK_WIDTH;
return (ldq_panel * blks_wide_total);
}
//matrix is transposed inside every matrix block,
void QR_matrix::panelTranspose(const real_t * mat_in, const int m, const int n, const int lda) {
assert(lda >= m);
//CHECK_CUDA( hipMemset(mat_base, 0, internal_matrix_size * sizeof(real_t)));
//calculate_dimensions(m, n);
// grid is set to the block number
// 1 threadblock is in charge of transpose 1 matrix block,
hipLaunchKernelGGL(( blockTranspose) , dim3(dim3(blks_tall_total, blks_wide_total)), dim3(BLK_HEIGHT) , 0, 0,
mat_base, mat_in, ld_panel_size, m, n, lda);
CHECK_CUDA( hipDeviceSynchronize() );
CHECK_CUDA( hipGetLastError() );
}
void QR_matrix::calculate_dimensions(const int m, const int n) {
this->m = m;
this->n = n;
m_current = m;
n_current = n;
blks_wide_total = (n + BLK_WIDTH - 1) / BLK_WIDTH;
blks_tall_total = ((m + BLK_HEIGHT - 1) / BLK_HEIGHT) + 1; // + 1 is necessary! find out why!
total_blocks = set_levels();
ld_panel_size = blks_tall_total * BLK_WIDTH * BLK_HEIGHT; // the size of a panel, (which is transposed)
ldq = total_blocks * BLK_HEIGHT;
ldq_panel = ldq * BLK_WIDTH;
internal_matrix_size = ld_panel_size * blks_wide_total;
}
QR_matrix::QR_matrix() {}
QR_matrix::QR_matrix(real_t * d_A, const int m, const int n, const int lda) {
// Build the data structures
calculate_dimensions(m, n);
// Allocate the data matrix
CHECK_CUDA( hipMalloc((real_t**) &mat_base, internal_matrix_size * sizeof(real_t)));
Q_base = d_A;
// CHECK_CUDA( hipMalloc( (real_t**) &Q_base, ldq_panel * blks_wide_total * sizeof(real_t) ) );
// CHECK_CUDA( hipMemset( Q_base, 0, ldq_panel * blks_wide_total * sizeof(real_t) ) );
// Transpose
panelTranspose(d_A, m, n, lda);
// Allocate the Q matrix
// printf("A size = %d.\n", lda * n);
// printf("A' size = %d.\n", internal_matrix_size);
// printf("Q szie = %d.\n", ldq_panel * blks_wide_total);
// Set "current" pointers
mat_cur = mat_base;
Q = Q_base;
this->lda = lda;
}
#define SIMD_WIDTH 16
void QR_matrix::factor()
{
for(int i = 0; i < blks_wide_total; i++) {
// Factor two blocks on the left
hipLaunchKernelGGL(( hh_factor_dense) , dim3(blks_tall_cur), dim3(NUM_THREADS) , 0, 0, mat_cur, blockQ(0), m_current, ld_panel_size, ldq);
// Update two blocks on the left and right
int y_wid = 1;
if(blks_tall_cur * blks_wide_cur < 2000) { y_wid = blks_wide_cur; }
else { y_wid = (blks_wide_cur - 1) / SIMD_WIDTH + 1;}
// **** Added from version 1.2 to get performance on large square **
hipLaunchKernelGGL(( hh_update_dense) , dim3(dim3(blks_tall_cur, y_wid)), dim3(NUM_THREADS) , 0, 0, mat_cur, blockQ(0), ld_panel_size, ldq, blks_wide_cur);
for (int lev = 1; lev < levels.size(); lev++) {
level_t * cur_lev = levels[lev];
// TODO
hipLaunchKernelGGL(( hh_factor_triangle) , dim3(dim3(cur_lev->num_blocks, 1)), dim3(NUM_THREADS), 0, 0, mat_cur, blockQ(lev), m_current, ld_panel_size, ldq, cur_lev->block_offset, mat_cur + m_current * BLK_WIDTH);
// TODO
hipLaunchKernelGGL(( hh_update_triangle) , dim3(dim3(cur_lev->num_blocks, blks_wide_cur)), dim3(NUM_THREADS), 0, 0, mat_cur, blockQ(lev), ld_panel_size, ldq, cur_lev->block_offset, mat_cur + m_current * BLK_WIDTH);
}
// Next panel
increment(false);
}
CHECK_CUDA( hipDeviceSynchronize() );
CHECK_CUDA( hipGetLastError() );
}
void QR_matrix::retrieveQ()
{
/* set block Q to identity matrix
Q = |1 0 .. 0 |
|0 1 .. 0 |
|0 ..... |
*/
CHECK_CUDA( hipMemset(mat_base, 0, internal_matrix_size * sizeof(real_t)));
calculate_dimensions(m, n);
hipLaunchKernelGGL(( set_ident) , dim3(dim3(blks_wide_total)), dim3(BLK_WIDTH) , 0, 0, mat_base, ld_panel_size, m, n);
// Set "current" pointers
mat_cur= mat_base;
Q = Q_base;
int k_blks = (n + BLK_WIDTH - 1) / BLK_WIDTH;
// A bit of a hack, but it's probably fine.
for(int panel = 0 ; panel < blks_wide_total - 1 ; panel++) increment(true);
for(int panel = blks_wide_total - 1 ; panel >= 0 ; panel--) {
// Probably want to iterate through "levels" here. That's why you used STL right?
for (int lev = levels.size() - 1; lev > 0; lev--) {
level_t * cur_lev = levels[lev];
hipLaunchKernelGGL(( hh_update_triangle_reverse) , dim3(dim3(cur_lev->num_blocks, k_blks)), dim3(NUM_THREADS) , 0, 0,
mat_cur, blockQ(lev), ld_panel_size, ldq, cur_lev->block_offset, mat_cur + m_current * BLK_WIDTH);
}
// Update two blocks on the left and right
hipLaunchKernelGGL(( hh_update_dense_reverse) , dim3(dim3(blks_tall_cur, k_blks)), dim3(NUM_THREADS) , 0, 0, mat_cur, blockQ(0), ld_panel_size, ldq);
// Next panel
decrement(true);
}
}
int QR_matrix::set_levels() {
levels.clear();
blks_tall_cur = ((m_current + BLK_HEIGHT - 1) / BLK_HEIGHT);
blks_wide_cur = (n_current + BLK_WIDTH - 1) / BLK_WIDTH;
int num_blocks = blks_tall_cur;
int tb = num_blocks;
// Add the first block
level_t *lev = new level_t(num_blocks, 0, 0, 0);
levels.push_back(lev);
int block_offset = 1;
int levnum = 1;
while(num_blocks > 1) {
num_blocks = (num_blocks + (BLK_HEIGHT/BLK_WIDTH) - 1) / (BLK_HEIGHT / BLK_WIDTH);
lev = new level_t(num_blocks, tb, block_offset, levnum);
levels.push_back(lev);
block_offset *= (BLK_HEIGHT/BLK_WIDTH);
levnum++;
tb += num_blocks;
}
return tb;
}
void QR_matrix::panelTransInv(real_t * mat_out, const int m, const int n, const int lda) {
dim3 gridDim(blks_tall_total, blks_wide_total);
dim3 blockDim(BLK_HEIGHT);
hipLaunchKernelGGL(( trans_inv) , dim3(gridDim), dim3(blockDim) , 0, 0, mat_out, mat_base, ld_panel_size, m, n, lda);
}
void QR_matrix::retrieveR(real_t * mat_out, const int m, const int n, const int lda) {
dim3 gridDim((blks_wide_total + BLK_ROWS - 1) / BLK_ROWS , blks_wide_total);
dim3 blockDim(BLK_HEIGHT);
hipLaunchKernelGGL(( trans_inv) , dim3(gridDim), dim3(blockDim) , 0, 0, mat_out, mat_base, ld_panel_size, m, n, lda);
}
// Add to "current" pointers by one panel
void QR_matrix::increment(bool levelChagneFlag) {
if(!levelChagneFlag) mat_cur = mat_cur+ ld_panel_size;
mat_cur = mat_cur+ BLK_WIDTH * BLK_WIDTH;
Q = Q + ldq_panel;
m_current -= BLK_WIDTH;
n_current -= BLK_WIDTH;
set_levels();
}
// Add to "current" pointers by one panel
void QR_matrix::decrement(bool levelChagneFlag) {
if(!levelChagneFlag) mat_cur= mat_cur- ld_panel_size;
mat_cur = mat_cur - BLK_WIDTH * BLK_WIDTH;
Q = Q - ldq_panel;
m_current += BLK_WIDTH;
n_current += BLK_WIDTH;
set_levels();
}
/* destructor */
QR_matrix::~QR_matrix() {
CHECK_CUDA( hipFree(mat_base));
// CHECK_CUDA( hipFree(Q_base));
}
level_t::level_t() {}
level_t::level_t(int nb, int ab, int bo, int l) {
num_blocks = nb;
aggregate_blocks = ab;
block_offset = bo;
lev = l;
}
void orth_CAQR(real_t *d_A, const uint64_t m, const uint64_t n){
const int lda = roundup_to_32X( m );
QR_matrix *QRobj = new QR_matrix(d_A, m, n, lda);
// QR factorization
QRobj->factor();
//QRobj->retrieveR(d_A, m, n, lda);
// Retrieve Q
QRobj->retrieveQ();
QRobj->panelTransInv(d_A, m, n, lda);
CHECK_CUDA( hipDeviceSynchronize() );
CHECK_CUDA( hipGetLastError() );
delete QRobj;
}
| 685e7b6b731ca07c62c0c0f91f895e7d7b0cec19.cu | #include "assert.h"
#include "gpuErrorCheck.h"
#include "rsvd.h"
#include <vector>
#include <iostream>
#include <cuda.h>
using namespace std;
/* Tuned settings for float and double */
/*
#define FLOAT
#define real_t float
#define BLK_WIDTH 16
#define BLK_HEIGHT 128
#define NUM_THREADS 128
#define BLK_SIZE (BLK_WIDTH * BLK_HEIGHT)
#define BLK_ROWS (NUM_THREADS / BLK_WIDTH)
#define THREAD_STORAGE BLK_WIDTH
#define TID_L_MASK 0xF
#define TID_U_SHIFT 4
#define ZERO_INIT {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}
*/
#define DOUBLE
#define real_t double
#define BLK_WIDTH 8
#define BLK_HEIGHT 64
#define NUM_THREADS 64
#define BLK_SIZE (BLK_WIDTH * BLK_HEIGHT)
#define BLK_ROWS (NUM_THREADS / BLK_WIDTH)
#define THREAD_STORAGE BLK_WIDTH
#define TID_L_MASK 0x7
#define TID_U_SHIFT 3
#define ZERO_INIT {0,0,0,0,0,0,0,0}
/******************************************* class ****************************************************/
class level_t {
public:
int num_blocks;
int aggregate_blocks;
int block_offset;
int lev;
level_t();
level_t(int nb, int ab, int bo, int l);
};
/* This defines the main matrix data structure and functions to
access and modify it */
class QR_matrix {
public:
/* Input matrix properties */
int m;
int n;
int lda;
int ld_panel_size;
int ldq;
int ldq_panel;
int blks_tall_total;
int blks_wide_total;
int internal_matrix_size;
int total_blocks;
real_t * mat_base;
real_t * Q_base;
/* Current information */
real_t * mat_cur;
real_t * Q;
int m_current;
int n_current;
int blks_tall_cur;
int blks_wide_cur;
vector<level_t*> levels;
/* Constructors*/
QR_matrix();
QR_matrix(real_t * h_A, const int m, const int n, const int lda);
~QR_matrix();
/* Set the matrix in its internal form (transpose) and retrieve it back */
void factor();
void retrieveQ();
void calculate_dimensions(const int m, const int n);
void panelTranspose(const real_t * mat_in, const int m, const int n, const int lda);
void panelTransInv(real_t * mat_out, const int m, const int n, const int lda);
void retrieveR(real_t * mat_out, const int m, const int n, const int lda);
real_t * blockQ(const int l);
/* Update the pointer to the next panel */
void increment(bool levelChagneFlag);
void decrement(bool levelChagneFlag);
int set_levels();
};
/*************************************** CUDA kernels *****************************************************/
__device__ real_t reduce(real_t * u_sh, real_t ub[], real_t col[], real_t * av, int tid_u, int tid_l, int tid)
{
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE ; i++)
ub[i] = u_sh[tid_u + i*BLK_ROWS];
real_t val = (real_t) 0;
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE ; i++)
val += ub[i] * col[i];
if(tid >= (NUM_THREADS/2)) av[tid] = val;
__syncthreads();
if(tid < (NUM_THREADS/2)) av[tid] = av[tid + (NUM_THREADS/2)] + val;
__syncthreads();
val = 0;
#pragma unroll
for(int i = 0 ; i < (BLK_ROWS/2) ; i++)
val += av[tid_l + BLK_WIDTH*i];
return val;
}
__device__ void update(real_t * u_sh, real_t ub[], real_t col[], real_t res, int tid_u)
{
// Rank-1 update
real_t fres = (real_t) 2 * res;
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE ; i++)
col[i] -= (real_t) ub[i] * fres;
}
__device__ void load_a(real_t * a, real_t col[], int tid)
{
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE; i++)
col[i] = a[tid + i*NUM_THREADS];
}
__device__ void load_a_triangles(real_t * a, real_t col[], int tid, int offset_blocks, real_t * A_max)
{
// This is hardcoded for 128x16. Oops!
real_t * a_orig = a;
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE / (BLK_WIDTH/BLK_ROWS); i++) {
if(a < A_max) {
for(int ii = 0 ; ii < (BLK_WIDTH/BLK_ROWS) ; ii++)
col[(BLK_WIDTH/BLK_ROWS)*i + ii ] = a[tid + ii*NUM_THREADS];
}
a += offset_blocks * BLK_SIZE;
}
a = a_orig;
}
__device__ void write_a(real_t * a, real_t col[], int tid)
{
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE ; i++)
a[tid + i*NUM_THREADS] = col[i];
}
__device__ void write_a_triangles(real_t * a, real_t col[], int tid, int offset_blocks, real_t * A_max)
{
// This is hardcoded for 128x16. Oops!
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE/ (BLK_WIDTH/BLK_ROWS); i++) {
if(a < A_max) {
for(int ii = 0 ; ii < (BLK_WIDTH/BLK_ROWS) ; ii++)
a[tid + ii*NUM_THREADS] = col[(BLK_WIDTH/BLK_ROWS)*i + ii ];
}
a += offset_blocks * BLK_SIZE;
}
}
__device__ void load_u(real_t * u_sh, real_t * b, int tid)
{
#pragma unroll
for(int i = 0 ; i < BLK_WIDTH ; i++)
u_sh[tid + i*BLK_HEIGHT] = b[tid + i*BLK_HEIGHT];
__syncthreads();
}
__global__ void hh_update_dense_reverse(real_t * a, real_t * b, int lda_panel, int ldq)
{
__shared__ real_t av[NUM_THREADS];
__shared__ real_t u[BLK_HEIGHT * BLK_WIDTH];
real_t col[THREAD_STORAGE];
real_t ub[THREAD_STORAGE];
real_t * u_sh;
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
// Pretend we are in panel-transpose form
a += blockIdx.x * BLK_SIZE + blockIdx.y * lda_panel;
// Pretend we are in column major form
b += blockIdx.x * BLK_SIZE;
// load in the block
load_a(a, col, tid);
// Load u
u_sh = &u[0];
load_u(u_sh, b, tid);
u_sh = &u[(BLK_WIDTH-1) * BLK_HEIGHT];
// For each Householder vector
for(int j = 0 ; j < BLK_WIDTH; j++) {
// Matrix-vector multiply
real_t res = reduce(u_sh, ub, col, av, tid_u, tid_l, tid);
// Rank-1 update
update(u_sh, ub, col, res, tid_u);
// Go to the next Householder vector
u_sh -= BLK_HEIGHT;
}
// write out the block
write_a(a, col, tid);
}
__device__ void compute_u(real_t * u_sh, real_t col[], real_t norms[], int tid, int tid_u, int tid_l, int j, int row, int m)
{
__shared__ real_t mulby_sh;
__syncthreads();
if(j + row >= m) {
u_sh[tid] = (real_t) 0;
}
else {
if(tid_l == j) {
real_t local = 0.0;
#pragma unroll
for(int i = 0 ; i < THREAD_STORAGE ; i++) {
u_sh[tid_u + i*BLK_ROWS] = col[i];
if(tid_u + i*BLK_ROWS > j) local += col[i] * col[i];
}
norms[tid_u] = local;
}
__syncthreads();
if(tid == j)
{
real_t nm2_nminus1 = (real_t)0.0;
#pragma unroll
for(int i = 0 ; i < BLK_ROWS ; i++)
nm2_nminus1 += norms[i];
real_t top_element = u_sh[j];
#ifdef DOUBLE
real_t nm = sqrt(nm2_nminus1 + top_element*top_element);
#endif
#ifdef FLOAT
real_t nm = sqrtf(nm2_nminus1 + top_element*top_element);
#endif
u_sh[j] = top_element = (top_element >= (real_t)0) ? top_element + nm : top_element - nm;
#ifdef DOUBLE
real_t divby = sqrt(nm2_nminus1 + top_element*top_element);
#endif
#ifdef FLOAT
real_t divby = sqrtf(nm2_nminus1 + top_element*top_element);
#endif
mulby_sh = (divby != (real_t) 0) ? ((real_t) 1.0) / divby : (real_t)0;
}
if(tid < j) u_sh[tid] = (real_t) 0;
__syncthreads();
u_sh[tid] *= mulby_sh;
}
__syncthreads();
}
// factor a small matrix block, householder vector is saved on b
__global__ void hh_factor_dense(real_t * a, real_t * b, int m, int lda_panel, int ldq)
{
__shared__ real_t av[NUM_THREADS];
__shared__ real_t u[BLK_HEIGHT];
__shared__ real_t norms[BLK_WIDTH];
real_t col[THREAD_STORAGE];
real_t ub[THREAD_STORAGE];
real_t * u_sh = &u[0];
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
int row = blockIdx.x * BLK_HEIGHT;
// Pretend we are in panel-transpose form
a += blockIdx.x * BLK_SIZE + blockIdx.y * lda_panel;
// Pretend we are in column major form
b += blockIdx.x * BLK_SIZE;
// load in the block
load_a(a, col, tid);
// For each column of a
for(int j = 0 ; j < BLK_WIDTH ; j++) {
// Form (transpose) the u vector
compute_u(u_sh, col, norms, tid, tid_u, tid_l, j, row, m);
// Matrix-vector multiply: res = v' * A(i:m, :);
real_t res = reduce(u_sh, ub, col, av, tid_u, tid_l, tid);
// Rank-1 update: A(j:m, :) -= 2 * v * res;
update(u_sh, ub, col, res, tid_u);
// Write out u
b[tid] = u_sh[tid];
// Go to the next Householder vector
b += BLK_HEIGHT;
}
}
//
__global__ void hh_update_dense(real_t * a, real_t * b, int lda_panel, int ldq, int max_y)
{
__shared__ real_t av[NUM_THREADS];
__shared__ real_t u[BLK_HEIGHT * BLK_WIDTH];
real_t col[THREAD_STORAGE];
real_t ub[THREAD_STORAGE];
real_t * u_sh;
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
// Pretend we are in panel-transpose form
a += blockIdx.x * BLK_SIZE + blockIdx.y * lda_panel;
// Pretend we are in column major form
b += blockIdx.x * BLK_SIZE;
// Load u
u_sh = &u[0];
load_u(u_sh, b, tid);
for(int p = blockIdx.y ; p < max_y ; p += gridDim.y)
{
// load in the block
load_a(a, col, tid);
// For each Householder vector
for(int j = 0 ; j < BLK_WIDTH; j++) {
// Matrix-vector multiply
real_t res = reduce(u_sh, ub, col, av, tid_u, tid_l, tid);
// Rank-1 update
update(u_sh, ub, col, res, tid_u);
// Go to the next Householder vector
u_sh += BLK_HEIGHT;
}
// write out the block
write_a(a, col, tid);
u_sh = &u[0];
a += gridDim.y * lda_panel;
}
}
__global__ void hh_factor_triangle(real_t * a, real_t * b, int m, int lda_panel, int ldq, int offset_blocks, real_t * A_max)
{
__shared__ real_t av[NUM_THREADS];
__shared__ real_t u[BLK_HEIGHT];
__shared__ real_t norms[BLK_WIDTH];
real_t col[THREAD_STORAGE] = ZERO_INIT;
real_t ub[THREAD_STORAGE];
real_t * u_sh = &u[0];
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
int row = blockIdx.x * (BLK_HEIGHT / BLK_WIDTH) * offset_blocks * BLK_HEIGHT;
// Pretend we are in panel-transpose form
a += row * BLK_WIDTH + blockIdx.y * lda_panel;
// Pretend we are in column major form
b += blockIdx.x * BLK_SIZE;
// load in the block
load_a_triangles(a, col, tid, offset_blocks, A_max);
// For each column of a
for(int j = 0 ; j < BLK_WIDTH ; j++) {
// Form (transpose) the u vector
compute_u(u_sh, col, norms, tid, tid_u, tid_l, j, row, m);
// Matrix-vector multiply
real_t res = reduce(u_sh, ub, col, av, tid_u, tid_l, tid);
// Rank-1 update
update(u_sh, ub, col, res, tid_u);
// Write out u
b[tid] = u_sh[tid];
// Go to the next Householder vector
b += BLK_HEIGHT;
}
}
__global__ void hh_update_triangle_reverse(real_t * a, real_t * b, int lda_panel, int ldq, int offset_blocks, real_t * A_max)
{
__shared__ real_t av[NUM_THREADS];
__shared__ real_t u[BLK_HEIGHT * BLK_WIDTH];
real_t col[THREAD_STORAGE] = ZERO_INIT;
real_t ub[THREAD_STORAGE];
real_t * u_sh;
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
// Pretend we are in panel-transpose form
a += blockIdx.x * BLK_ROWS * offset_blocks * BLK_SIZE + blockIdx.y * lda_panel;
// Update A_max in case we are working on a different column
A_max += blockIdx.y * lda_panel;
// Pretend we are in column major form
b += blockIdx.x * BLK_SIZE;
// load in the block
load_a_triangles(a, col, tid, offset_blocks, A_max);
// Load u
u_sh = &u[0];
load_u(u_sh, b, tid);
u_sh = &u[(BLK_WIDTH-1)*BLK_HEIGHT];
// For each Householder vector
for(int j = 0 ; j < BLK_WIDTH; j++) {
// Matrix-vector multiply
real_t res = reduce(u_sh, ub, col, av, tid_u, tid_l, tid);
// Rank-1 update
update(u_sh, ub, col, res, tid_u);
// Go to the next Householder vector
u_sh -= BLK_HEIGHT;
}
// write out the block
write_a_triangles(a, col, tid, offset_blocks, A_max);
}
__global__ void hh_update_triangle(real_t * a, real_t * b, int lda_panel, int ldq, int offset_blocks, real_t * A_max)
{
__shared__ real_t av[NUM_THREADS];
__shared__ real_t u[BLK_HEIGHT * BLK_WIDTH];
real_t col[THREAD_STORAGE] = ZERO_INIT;
real_t ub[THREAD_STORAGE];
real_t * u_sh;
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
// Pretend we are in panel-transpose form
a += blockIdx.x * BLK_ROWS * offset_blocks * BLK_SIZE + blockIdx.y * lda_panel;
// Update the max in case we are on a different panel
A_max += blockIdx.y * lda_panel;
// Pretend we are in column major form
b += blockIdx.x * BLK_SIZE;
// load in the block
load_a_triangles(a, col, tid, offset_blocks, A_max);
// Load u
u_sh = &u[0];
load_u(u_sh, b, tid);
// For each Householder vector
for(int j = 0 ; j < BLK_WIDTH; j++) {
// Matrix-vector multiply
real_t res = reduce(u_sh, ub, col, av, tid_u, tid_l, tid);
// Rank-1 update
update(u_sh, ub, col, res, tid_u);
// Go to the next Householder vector
u_sh += BLK_HEIGHT;
}
// write out the block
write_a_triangles(a, col, tid, offset_blocks, A_max);
}
/* Get the address of a block (i,j) for level l in the Q matrix */
real_t * QR_matrix::blockQ(const int l) {
assert(l <= levels.size());
int agg_blocks = levels[l]->aggregate_blocks;
// Get pointer to the next level
int offset_blocks = agg_blocks * BLK_SIZE;
return Q + offset_blocks;
}
/* Panel transpose of a block
<<< dim3(blks_tall_total, blks_wide_total), BLK_HEIGHT >>> */
__global__ void blockTranspose(real_t * out, const real_t * in,
int ld_panel_size, int m, int n, int ld_col)
{
// Shared memory
__shared__ real_t sh[(BLK_HEIGHT + 1) * BLK_WIDTH];// why + 1? to avoid bank conflict?
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
// Offset the input vector address
in += BLK_HEIGHT * blockIdx.x + BLK_WIDTH * ld_col * blockIdx.y;
out += BLK_SIZE * blockIdx.x + ld_panel_size * blockIdx.y;
// If we are close to the border then this will be < BLK_WIDTH
int n_it = n - BLK_WIDTH * blockIdx.y;
// Load whole block into shared memory into column major
if(tid + BLK_HEIGHT * blockIdx.x < m) {
#pragma unroll
for(int i = 0 ; i < BLK_WIDTH; i++)
sh[tid + i*BLK_HEIGHT+i] = (i < n_it) ? in[tid + i * ld_col] : (real_t) 0;
} else {
#pragma unroll
for(int i = 0 ; i < BLK_WIDTH ; i++)
sh[tid + i*BLK_HEIGHT+i] = (real_t) 0;
}
__syncthreads();
// Load block out of shared memory in transposed form
int off = tid_l * (BLK_HEIGHT+1) + tid_u;
#pragma unroll
for(int i = 0 ; i < BLK_WIDTH ; i++)
{ out[tid + i * BLK_HEIGHT] = sh[off + i * BLK_ROWS]; }
}
/* Panel transpose of the entire matrix (inverse) */
__global__ void trans_inv(real_t * out, const real_t * in, int ld_panel_size, int m, int n, int ld_col)
{
__shared__ real_t sh[(BLK_HEIGHT+1) * BLK_WIDTH];
int tid = threadIdx.x;
int tid_l = tid & TID_L_MASK;
int tid_u = tid >> TID_U_SHIFT;
// Offset the output matrix
in += BLK_SIZE * blockIdx.x + ld_panel_size * blockIdx.y;
out += BLK_HEIGHT * blockIdx.x + BLK_WIDTH * ld_col * blockIdx.y;
// In case we run off the end in the n direction
int n_it = n - BLK_WIDTH * blockIdx.y;
n_it = (n_it < BLK_WIDTH) ? n_it : BLK_WIDTH;
// Load block into shared memory in column major
int off = tid_l * (BLK_HEIGHT+1) + tid_u;
#pragma unroll
for(int i = 0 ; i < BLK_WIDTH ; i++)
sh[off + i * BLK_ROWS] = in[tid + i * BLK_HEIGHT];
__syncthreads();
// Wrtite back out
if(tid + BLK_HEIGHT * blockIdx.x >= m) return;
for(int i = 0 ; i < n_it; i++)
out[tid + i * ld_col] = sh[tid + i*BLK_HEIGHT+i];
}
/* Set matrix to identity */
__global__ void set_ident(real_t * A, int ld_panel_size, int m, int n)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
// Offset the input vector
A += (BLK_WIDTH * BLK_WIDTH + ld_panel_size) * bid;
if(tid + BLK_WIDTH * bid >= n) return;
// Set diagonal
int index = tid + tid * BLK_WIDTH;
A[index] = 1.0;
}
/*************************************** Host functions *******************************************/
int orth_CAQR_size(const int m, const int n){
int blks_tall_cur = ((m + BLK_HEIGHT - 1) / BLK_HEIGHT);
//int blks_wide_cur = (n + BLK_WIDTH - 1) / BLK_WIDTH;
int num_blocks = blks_tall_cur;
int tb = num_blocks;
// Add the first block
level_t *lev = new level_t(num_blocks, 0, 0, 0);
vector<level_t*> levels;
levels.push_back(lev);
int block_offset = 1;
int levnum = 1;
while(num_blocks > 1) {
num_blocks = (num_blocks + (BLK_HEIGHT/BLK_WIDTH) - 1) / (BLK_HEIGHT / BLK_WIDTH);
lev = new level_t(num_blocks, tb, block_offset, levnum);
levels.push_back(lev);
block_offset *= (BLK_HEIGHT/BLK_WIDTH);
levnum++;
tb += num_blocks;
}
//int blks_tall_total = ((m + BLK_HEIGHT - 1) / BLK_HEIGHT) + 1; // + 1 is necessary! find out why!
//int ld_panel_size = blks_tall_total * BLK_WIDTH * BLK_HEIGHT;
int ldq = tb * BLK_HEIGHT;
int blks_wide_total = (n + BLK_WIDTH - 1) / BLK_WIDTH;
int ldq_panel = ldq * BLK_WIDTH;
return (ldq_panel * blks_wide_total);
}
//matrix is transposed inside every matrix block,
void QR_matrix::panelTranspose(const real_t * mat_in, const int m, const int n, const int lda) {
assert(lda >= m);
//CHECK_CUDA( cudaMemset(mat_base, 0, internal_matrix_size * sizeof(real_t)));
//calculate_dimensions(m, n);
// grid is set to the block number
// 1 threadblock is in charge of transpose 1 matrix block,
blockTranspose <<< dim3(blks_tall_total, blks_wide_total), BLK_HEIGHT >>>
(mat_base, mat_in, ld_panel_size, m, n, lda);
CHECK_CUDA( cudaThreadSynchronize() );
CHECK_CUDA( cudaGetLastError() );
}
void QR_matrix::calculate_dimensions(const int m, const int n) {
this->m = m;
this->n = n;
m_current = m;
n_current = n;
blks_wide_total = (n + BLK_WIDTH - 1) / BLK_WIDTH;
blks_tall_total = ((m + BLK_HEIGHT - 1) / BLK_HEIGHT) + 1; // + 1 is necessary! find out why!
total_blocks = set_levels();
ld_panel_size = blks_tall_total * BLK_WIDTH * BLK_HEIGHT; // the size of a panel, (which is transposed)
ldq = total_blocks * BLK_HEIGHT;
ldq_panel = ldq * BLK_WIDTH;
internal_matrix_size = ld_panel_size * blks_wide_total;
}
QR_matrix::QR_matrix() {}
QR_matrix::QR_matrix(real_t * d_A, const int m, const int n, const int lda) {
// Build the data structures
calculate_dimensions(m, n);
// Allocate the data matrix
CHECK_CUDA( cudaMalloc((real_t**) &mat_base, internal_matrix_size * sizeof(real_t)));
Q_base = d_A;
// CHECK_CUDA( cudaMalloc( (real_t**) &Q_base, ldq_panel * blks_wide_total * sizeof(real_t) ) );
// CHECK_CUDA( cudaMemset( Q_base, 0, ldq_panel * blks_wide_total * sizeof(real_t) ) );
// Transpose
panelTranspose(d_A, m, n, lda);
// Allocate the Q matrix
// printf("A size = %d.\n", lda * n);
// printf("A' size = %d.\n", internal_matrix_size);
// printf("Q szie = %d.\n", ldq_panel * blks_wide_total);
// Set "current" pointers
mat_cur = mat_base;
Q = Q_base;
this->lda = lda;
}
#define SIMD_WIDTH 16
void QR_matrix::factor()
{
for(int i = 0; i < blks_wide_total; i++) {
// Factor two blocks on the left
hh_factor_dense <<< blks_tall_cur, NUM_THREADS >>> (mat_cur, blockQ(0), m_current, ld_panel_size, ldq);
// Update two blocks on the left and right
int y_wid = 1;
if(blks_tall_cur * blks_wide_cur < 2000) { y_wid = blks_wide_cur; }
else { y_wid = (blks_wide_cur - 1) / SIMD_WIDTH + 1;}
// **** Added from version 1.2 to get performance on large square **
hh_update_dense <<< dim3(blks_tall_cur, y_wid), NUM_THREADS >>> (mat_cur, blockQ(0), ld_panel_size, ldq, blks_wide_cur);
for (int lev = 1; lev < levels.size(); lev++) {
level_t * cur_lev = levels[lev];
// TODO
hh_factor_triangle <<< dim3(cur_lev->num_blocks, 1), NUM_THREADS>>> (mat_cur, blockQ(lev), m_current, ld_panel_size, ldq, cur_lev->block_offset, mat_cur + m_current * BLK_WIDTH);
// TODO
hh_update_triangle <<< dim3(cur_lev->num_blocks, blks_wide_cur), NUM_THREADS>>> (mat_cur, blockQ(lev), ld_panel_size, ldq, cur_lev->block_offset, mat_cur + m_current * BLK_WIDTH);
}
// Next panel
increment(false);
}
CHECK_CUDA( cudaThreadSynchronize() );
CHECK_CUDA( cudaGetLastError() );
}
void QR_matrix::retrieveQ()
{
/* set block Q to identity matrix
Q = |1 0 .. 0 |
|0 1 .. 0 |
|0 ..... |
*/
CHECK_CUDA( cudaMemset(mat_base, 0, internal_matrix_size * sizeof(real_t)));
calculate_dimensions(m, n);
set_ident <<< dim3(blks_wide_total), BLK_WIDTH >>> (mat_base, ld_panel_size, m, n);
// Set "current" pointers
mat_cur= mat_base;
Q = Q_base;
int k_blks = (n + BLK_WIDTH - 1) / BLK_WIDTH;
// A bit of a hack, but it's probably fine.
for(int panel = 0 ; panel < blks_wide_total - 1 ; panel++) increment(true);
for(int panel = blks_wide_total - 1 ; panel >= 0 ; panel--) {
// Probably want to iterate through "levels" here. That's why you used STL right?
for (int lev = levels.size() - 1; lev > 0; lev--) {
level_t * cur_lev = levels[lev];
hh_update_triangle_reverse <<< dim3(cur_lev->num_blocks, k_blks), NUM_THREADS >>>
(mat_cur, blockQ(lev), ld_panel_size, ldq, cur_lev->block_offset, mat_cur + m_current * BLK_WIDTH);
}
// Update two blocks on the left and right
hh_update_dense_reverse <<< dim3(blks_tall_cur, k_blks), NUM_THREADS >>> (mat_cur, blockQ(0), ld_panel_size, ldq);
// Next panel
decrement(true);
}
}
int QR_matrix::set_levels() {
levels.clear();
blks_tall_cur = ((m_current + BLK_HEIGHT - 1) / BLK_HEIGHT);
blks_wide_cur = (n_current + BLK_WIDTH - 1) / BLK_WIDTH;
int num_blocks = blks_tall_cur;
int tb = num_blocks;
// Add the first block
level_t *lev = new level_t(num_blocks, 0, 0, 0);
levels.push_back(lev);
int block_offset = 1;
int levnum = 1;
while(num_blocks > 1) {
num_blocks = (num_blocks + (BLK_HEIGHT/BLK_WIDTH) - 1) / (BLK_HEIGHT / BLK_WIDTH);
lev = new level_t(num_blocks, tb, block_offset, levnum);
levels.push_back(lev);
block_offset *= (BLK_HEIGHT/BLK_WIDTH);
levnum++;
tb += num_blocks;
}
return tb;
}
void QR_matrix::panelTransInv(real_t * mat_out, const int m, const int n, const int lda) {
dim3 gridDim(blks_tall_total, blks_wide_total);
dim3 blockDim(BLK_HEIGHT);
trans_inv <<< gridDim, blockDim >>> (mat_out, mat_base, ld_panel_size, m, n, lda);
}
void QR_matrix::retrieveR(real_t * mat_out, const int m, const int n, const int lda) {
dim3 gridDim((blks_wide_total + BLK_ROWS - 1) / BLK_ROWS , blks_wide_total);
dim3 blockDim(BLK_HEIGHT);
trans_inv <<< gridDim, blockDim >>> (mat_out, mat_base, ld_panel_size, m, n, lda);
}
// Add to "current" pointers by one panel
void QR_matrix::increment(bool levelChagneFlag) {
if(!levelChagneFlag) mat_cur = mat_cur+ ld_panel_size;
mat_cur = mat_cur+ BLK_WIDTH * BLK_WIDTH;
Q = Q + ldq_panel;
m_current -= BLK_WIDTH;
n_current -= BLK_WIDTH;
set_levels();
}
// Add to "current" pointers by one panel
void QR_matrix::decrement(bool levelChagneFlag) {
if(!levelChagneFlag) mat_cur= mat_cur- ld_panel_size;
mat_cur = mat_cur - BLK_WIDTH * BLK_WIDTH;
Q = Q - ldq_panel;
m_current += BLK_WIDTH;
n_current += BLK_WIDTH;
set_levels();
}
/* destructor */
QR_matrix::~QR_matrix() {
CHECK_CUDA( cudaFree(mat_base));
// CHECK_CUDA( cudaFree(Q_base));
}
level_t::level_t() {}
level_t::level_t(int nb, int ab, int bo, int l) {
num_blocks = nb;
aggregate_blocks = ab;
block_offset = bo;
lev = l;
}
void orth_CAQR(real_t *d_A, const uint64_t m, const uint64_t n){
const int lda = roundup_to_32X( m );
QR_matrix *QRobj = new QR_matrix(d_A, m, n, lda);
// QR factorization
QRobj->factor();
//QRobj->retrieveR(d_A, m, n, lda);
// Retrieve Q
QRobj->retrieveQ();
QRobj->panelTransInv(d_A, m, n, lda);
CHECK_CUDA( cudaThreadSynchronize() );
CHECK_CUDA( cudaGetLastError() );
delete QRobj;
}
|
ed485992101f7d1244ab8f8fd9bff09ac412171c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/border_interpolate.hpp"
namespace cv { namespace gpu { namespace cudev
{
namespace imgproc
{
#define FILTER2D_MAX_KERNEL_SIZE 16
__constant__ float c_filter2DKernel[FILTER2D_MAX_KERNEL_SIZE * FILTER2D_MAX_KERNEL_SIZE];
template <class SrcT, typename D>
__global__ void filter2D(const SrcT src, PtrStepSz<D> dst, const int kWidth, const int kHeight, const int anchorX, const int anchorY)
{
typedef typename TypeVec<float, VecTraits<D>::cn>::vec_type sum_t;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= dst.cols || y >= dst.rows)
return;
sum_t res = VecTraits<sum_t>::all(0);
int kInd = 0;
for (int i = 0; i < kHeight; ++i)
{
for (int j = 0; j < kWidth; ++j)
res = res + src(y - anchorY + i, x - anchorX + j) * c_filter2DKernel[kInd++];
}
dst(y, x) = saturate_cast<D>(res);
}
template <typename T, typename D, template <typename> class Brd> struct Filter2DCaller;
#define IMPLEMENT_FILTER2D_TEX_READER(type) \
texture< type , hipTextureType2D, hipReadModeElementType> tex_filter2D_ ## type (0, hipFilterModePoint, hipAddressModeClamp); \
struct tex_filter2D_ ## type ## _reader \
{ \
typedef type elem_type; \
typedef int index_type; \
const int xoff; \
const int yoff; \
tex_filter2D_ ## type ## _reader (int xoff_, int yoff_) : xoff(xoff_), yoff(yoff_) {} \
__device__ __forceinline__ elem_type operator ()(index_type y, index_type x) const \
{ \
return tex2D(tex_filter2D_ ## type , x + xoff, y + yoff); \
} \
}; \
template <typename D, template <typename> class Brd> struct Filter2DCaller< type , D, Brd> \
{ \
static void call(const PtrStepSz< type > srcWhole, int xoff, int yoff, PtrStepSz<D> dst, \
int kWidth, int kHeight, int anchorX, int anchorY, const float* borderValue, hipStream_t stream) \
{ \
typedef typename TypeVec<float, VecTraits< type >::cn>::vec_type work_type; \
dim3 block(16, 16); \
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); \
bindTexture(&tex_filter2D_ ## type , srcWhole); \
tex_filter2D_ ## type ##_reader texSrc(xoff, yoff); \
Brd<work_type> brd(dst.rows, dst.cols, VecTraits<work_type>::make(borderValue)); \
BorderReader< tex_filter2D_ ## type ##_reader, Brd<work_type> > brdSrc(texSrc, brd); \
hipLaunchKernelGGL(( filter2D), dim3(grid), dim3(block), 0, stream, brdSrc, dst, kWidth, kHeight, anchorX, anchorY); \
cudaSafeCall( hipGetLastError() ); \
if (stream == 0) \
cudaSafeCall( hipDeviceSynchronize() ); \
} \
};
IMPLEMENT_FILTER2D_TEX_READER(uchar);
IMPLEMENT_FILTER2D_TEX_READER(uchar4);
IMPLEMENT_FILTER2D_TEX_READER(ushort);
IMPLEMENT_FILTER2D_TEX_READER(ushort4);
IMPLEMENT_FILTER2D_TEX_READER(float);
IMPLEMENT_FILTER2D_TEX_READER(float4);
#undef IMPLEMENT_FILTER2D_TEX_READER
template <typename T, typename D>
void filter2D_gpu(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst,
int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel,
int borderMode, const float* borderValue, hipStream_t stream)
{
typedef void (*func_t)(const PtrStepSz<T> srcWhole, int xoff, int yoff, PtrStepSz<D> dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* borderValue, hipStream_t stream);
static const func_t funcs[] =
{
Filter2DCaller<T, D, BrdConstant>::call,
Filter2DCaller<T, D, BrdReplicate>::call,
Filter2DCaller<T, D, BrdReflect>::call,
Filter2DCaller<T, D, BrdWrap>::call,
Filter2DCaller<T, D, BrdReflect101>::call
};
if (stream == 0)
cudaSafeCall( hipMemcpyToSymbol(c_filter2DKernel, kernel, kWidth * kHeight * sizeof(float), 0, hipMemcpyDeviceToDevice) );
else
cudaSafeCall( hipMemcpyToSymbolAsync(c_filter2DKernel, kernel, kWidth * kHeight * sizeof(float), 0, hipMemcpyDeviceToDevice, stream) );
funcs[borderMode](static_cast< PtrStepSz<T> >(srcWhole), ofsX, ofsY, static_cast< PtrStepSz<D> >(dst), kWidth, kHeight, anchorX, anchorY, borderValue, stream);
}
template void filter2D_gpu<uchar, uchar>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, hipStream_t stream);
template void filter2D_gpu<uchar4, uchar4>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, hipStream_t stream);
template void filter2D_gpu<ushort, ushort>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, hipStream_t stream);
template void filter2D_gpu<ushort4, ushort4>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, hipStream_t stream);
template void filter2D_gpu<float, float>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, hipStream_t stream);
template void filter2D_gpu<float4, float4>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, hipStream_t stream);
}
}}}
#endif // CUDA_DISABLER
| ed485992101f7d1244ab8f8fd9bff09ac412171c.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/saturate_cast.hpp"
#include "opencv2/core/cuda/border_interpolate.hpp"
namespace cv { namespace gpu { namespace cudev
{
namespace imgproc
{
#define FILTER2D_MAX_KERNEL_SIZE 16
__constant__ float c_filter2DKernel[FILTER2D_MAX_KERNEL_SIZE * FILTER2D_MAX_KERNEL_SIZE];
template <class SrcT, typename D>
__global__ void filter2D(const SrcT src, PtrStepSz<D> dst, const int kWidth, const int kHeight, const int anchorX, const int anchorY)
{
typedef typename TypeVec<float, VecTraits<D>::cn>::vec_type sum_t;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= dst.cols || y >= dst.rows)
return;
sum_t res = VecTraits<sum_t>::all(0);
int kInd = 0;
for (int i = 0; i < kHeight; ++i)
{
for (int j = 0; j < kWidth; ++j)
res = res + src(y - anchorY + i, x - anchorX + j) * c_filter2DKernel[kInd++];
}
dst(y, x) = saturate_cast<D>(res);
}
template <typename T, typename D, template <typename> class Brd> struct Filter2DCaller;
#define IMPLEMENT_FILTER2D_TEX_READER(type) \
texture< type , cudaTextureType2D, cudaReadModeElementType> tex_filter2D_ ## type (0, cudaFilterModePoint, cudaAddressModeClamp); \
struct tex_filter2D_ ## type ## _reader \
{ \
typedef type elem_type; \
typedef int index_type; \
const int xoff; \
const int yoff; \
tex_filter2D_ ## type ## _reader (int xoff_, int yoff_) : xoff(xoff_), yoff(yoff_) {} \
__device__ __forceinline__ elem_type operator ()(index_type y, index_type x) const \
{ \
return tex2D(tex_filter2D_ ## type , x + xoff, y + yoff); \
} \
}; \
template <typename D, template <typename> class Brd> struct Filter2DCaller< type , D, Brd> \
{ \
static void call(const PtrStepSz< type > srcWhole, int xoff, int yoff, PtrStepSz<D> dst, \
int kWidth, int kHeight, int anchorX, int anchorY, const float* borderValue, cudaStream_t stream) \
{ \
typedef typename TypeVec<float, VecTraits< type >::cn>::vec_type work_type; \
dim3 block(16, 16); \
dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y)); \
bindTexture(&tex_filter2D_ ## type , srcWhole); \
tex_filter2D_ ## type ##_reader texSrc(xoff, yoff); \
Brd<work_type> brd(dst.rows, dst.cols, VecTraits<work_type>::make(borderValue)); \
BorderReader< tex_filter2D_ ## type ##_reader, Brd<work_type> > brdSrc(texSrc, brd); \
filter2D<<<grid, block, 0, stream>>>(brdSrc, dst, kWidth, kHeight, anchorX, anchorY); \
cudaSafeCall( cudaGetLastError() ); \
if (stream == 0) \
cudaSafeCall( cudaDeviceSynchronize() ); \
} \
};
IMPLEMENT_FILTER2D_TEX_READER(uchar);
IMPLEMENT_FILTER2D_TEX_READER(uchar4);
IMPLEMENT_FILTER2D_TEX_READER(ushort);
IMPLEMENT_FILTER2D_TEX_READER(ushort4);
IMPLEMENT_FILTER2D_TEX_READER(float);
IMPLEMENT_FILTER2D_TEX_READER(float4);
#undef IMPLEMENT_FILTER2D_TEX_READER
template <typename T, typename D>
void filter2D_gpu(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst,
int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel,
int borderMode, const float* borderValue, cudaStream_t stream)
{
typedef void (*func_t)(const PtrStepSz<T> srcWhole, int xoff, int yoff, PtrStepSz<D> dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* borderValue, cudaStream_t stream);
static const func_t funcs[] =
{
Filter2DCaller<T, D, BrdConstant>::call,
Filter2DCaller<T, D, BrdReplicate>::call,
Filter2DCaller<T, D, BrdReflect>::call,
Filter2DCaller<T, D, BrdWrap>::call,
Filter2DCaller<T, D, BrdReflect101>::call
};
if (stream == 0)
cudaSafeCall( cudaMemcpyToSymbol(c_filter2DKernel, kernel, kWidth * kHeight * sizeof(float), 0, cudaMemcpyDeviceToDevice) );
else
cudaSafeCall( cudaMemcpyToSymbolAsync(c_filter2DKernel, kernel, kWidth * kHeight * sizeof(float), 0, cudaMemcpyDeviceToDevice, stream) );
funcs[borderMode](static_cast< PtrStepSz<T> >(srcWhole), ofsX, ofsY, static_cast< PtrStepSz<D> >(dst), kWidth, kHeight, anchorX, anchorY, borderValue, stream);
}
template void filter2D_gpu<uchar, uchar>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, cudaStream_t stream);
template void filter2D_gpu<uchar4, uchar4>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, cudaStream_t stream);
template void filter2D_gpu<ushort, ushort>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, cudaStream_t stream);
template void filter2D_gpu<ushort4, ushort4>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, cudaStream_t stream);
template void filter2D_gpu<float, float>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, cudaStream_t stream);
template void filter2D_gpu<float4, float4>(PtrStepSzb srcWhole, int ofsX, int ofsY, PtrStepSzb dst, int kWidth, int kHeight, int anchorX, int anchorY, const float* kernel, int borderMode, const float* borderValue, cudaStream_t stream);
}
}}}
#endif // CUDA_DISABLER
|
32ad451415fb56ec712eedca70a6d2e71ce5f5d5.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "efficient.h"
namespace StreamCompaction {
namespace EfficientShared {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
#define blockSize 32
#define MAX_BLOCK_SIZE 32
#define MAX_CHUNK_SIZE 32
#define checkCUDAErrorWithLine(msg) ((void)0)
//checkCUDAError(msg, __LINE__)
#define USE_CUDA_DEV_SYNC 0
/* Macros below adapated from:
* https://developer.nvidia.com/gpugems/GPUGems3/gpugems3_ch39.html/
* Example 39-3 */
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
#define CONFLICT_FREE_OFFSET(n) \
((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS))
__global__ void scanChunk(int n, int *g_odata, const int *g_idata) {
extern __shared__ float sharedBuf[];
int idx = threadIdx.x;
int offset = 1;
// compute indices and offsets to write to banks without conflict
int aIdx = idx;
int bIdx = idx + n / 2;
int bankOffsetA = CONFLICT_FREE_OFFSET(aIdx);
int bankOffsetB = CONFLICT_FREE_OFFSET(bIdx);
sharedBuf[aIdx + bankOffsetA] = g_idata[aIdx];
sharedBuf[bIdx + bankOffsetB] = g_idata[bIdx];
// begin up-sweep
for (int d = n / 2; d > 0; d /= 2) {
__syncthreads();
if (idx < d) {
aIdx = offset * (2 * idx + 1) - 1;
bIdx = aIdx + offset;//offset * (2 * idx + 2) - 1;
aIdx += CONFLICT_FREE_OFFSET(aIdx);
bIdx += CONFLICT_FREE_OFFSET(bIdx);
sharedBuf[bIdx] += sharedBuf[aIdx];
}
offset *= 2;
}
// set last idx to 0
if (idx == 0) {
sharedBuf[n - 1 + CONFLICT_FREE_OFFSET(n - 1)] = 0;
}
for (int d = 1; d < n; d *= 2) {
offset /= 2;
__syncthreads();
if (idx < d) {
aIdx = offset * (2 * idx + 1) - 1;
bIdx = aIdx + offset;//offset * (2 * idx + 2) - 1;
aIdx += CONFLICT_FREE_OFFSET(aIdx);
bIdx += CONFLICT_FREE_OFFSET(bIdx);
float originalNodeValue = sharedBuf[aIdx];
sharedBuf[aIdx] = sharedBuf[bIdx];
sharedBuf[bIdx] += originalNodeValue;
}
}
__syncthreads();
g_odata[aIdx] = sharedBuf[aIdx + bankOffsetA];
g_odata[bIdx] = sharedBuf[bIdx + bankOffsetB];
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
* internalUse specifies whether this is used as a helper function,
* for example, in compact. If so, it assumes idata and odata are in
* device memory and does not use gpuTimer.
*/
void scan(int n, int *odata, const int *idata) {
if (n == 1) {
odata[0] = 0;
return;
}
// TODO: handle n <= 2 ???
// nearest power of two
const int bufSize = 1 << ilog2ceil(n);
int *dev_buf;
hipMalloc((void**)&dev_buf, bufSize * sizeof(int));
checkCUDAErrorWithLine("malloc dev_buf error!!!");
if (n < bufSize) {
hipMemset(dev_buf + n, 0, (bufSize - n) * sizeof(int));
checkCUDAErrorWithLine("memset dev_buf to 0 error!!!");
}
hipMemcpy(dev_buf, idata, n * sizeof(int), hipMemcpyHostToDevice);
checkCUDAErrorWithLine("memcpy dev_buf error!!!");
timer().startGpuTimer();
hipLaunchKernelGGL(( scanChunk), dim3(dim3(1)), dim3(bufSize / 2), bufSize * sizeof(int), 0, bufSize, dev_buf, dev_buf);
checkCUDAErrorWithLine("scan chunk error!!!");
timer().endGpuTimer();
hipMemcpy(odata, dev_buf, n * sizeof(int), hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy dev_buf to host error!!!");
hipFree(dev_buf);
checkCUDAErrorWithLine("free dev_buf error!!!");
}
}
}
| 32ad451415fb56ec712eedca70a6d2e71ce5f5d5.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "efficient.h"
namespace StreamCompaction {
namespace EfficientShared {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
#define blockSize 32
#define MAX_BLOCK_SIZE 32
#define MAX_CHUNK_SIZE 32
#define checkCUDAErrorWithLine(msg) ((void)0)
//checkCUDAError(msg, __LINE__)
#define USE_CUDA_DEV_SYNC 0
/* Macros below adapated from:
* https://developer.nvidia.com/gpugems/GPUGems3/gpugems3_ch39.html/
* Example 39-3 */
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
#define CONFLICT_FREE_OFFSET(n) \
((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS))
__global__ void scanChunk(int n, int *g_odata, const int *g_idata) {
extern __shared__ float sharedBuf[];
int idx = threadIdx.x;
int offset = 1;
// compute indices and offsets to write to banks without conflict
int aIdx = idx;
int bIdx = idx + n / 2;
int bankOffsetA = CONFLICT_FREE_OFFSET(aIdx);
int bankOffsetB = CONFLICT_FREE_OFFSET(bIdx);
sharedBuf[aIdx + bankOffsetA] = g_idata[aIdx];
sharedBuf[bIdx + bankOffsetB] = g_idata[bIdx];
// begin up-sweep
for (int d = n / 2; d > 0; d /= 2) {
__syncthreads();
if (idx < d) {
aIdx = offset * (2 * idx + 1) - 1;
bIdx = aIdx + offset;//offset * (2 * idx + 2) - 1;
aIdx += CONFLICT_FREE_OFFSET(aIdx);
bIdx += CONFLICT_FREE_OFFSET(bIdx);
sharedBuf[bIdx] += sharedBuf[aIdx];
}
offset *= 2;
}
// set last idx to 0
if (idx == 0) {
sharedBuf[n - 1 + CONFLICT_FREE_OFFSET(n - 1)] = 0;
}
for (int d = 1; d < n; d *= 2) {
offset /= 2;
__syncthreads();
if (idx < d) {
aIdx = offset * (2 * idx + 1) - 1;
bIdx = aIdx + offset;//offset * (2 * idx + 2) - 1;
aIdx += CONFLICT_FREE_OFFSET(aIdx);
bIdx += CONFLICT_FREE_OFFSET(bIdx);
float originalNodeValue = sharedBuf[aIdx];
sharedBuf[aIdx] = sharedBuf[bIdx];
sharedBuf[bIdx] += originalNodeValue;
}
}
__syncthreads();
g_odata[aIdx] = sharedBuf[aIdx + bankOffsetA];
g_odata[bIdx] = sharedBuf[bIdx + bankOffsetB];
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
* internalUse specifies whether this is used as a helper function,
* for example, in compact. If so, it assumes idata and odata are in
* device memory and does not use gpuTimer.
*/
void scan(int n, int *odata, const int *idata) {
if (n == 1) {
odata[0] = 0;
return;
}
// TODO: handle n <= 2 ???
// nearest power of two
const int bufSize = 1 << ilog2ceil(n);
int *dev_buf;
cudaMalloc((void**)&dev_buf, bufSize * sizeof(int));
checkCUDAErrorWithLine("malloc dev_buf error!!!");
if (n < bufSize) {
cudaMemset(dev_buf + n, 0, (bufSize - n) * sizeof(int));
checkCUDAErrorWithLine("memset dev_buf to 0 error!!!");
}
cudaMemcpy(dev_buf, idata, n * sizeof(int), cudaMemcpyHostToDevice);
checkCUDAErrorWithLine("memcpy dev_buf error!!!");
timer().startGpuTimer();
scanChunk<<<dim3(1), bufSize / 2, bufSize * sizeof(int)>>>(bufSize, dev_buf, dev_buf);
checkCUDAErrorWithLine("scan chunk error!!!");
timer().endGpuTimer();
cudaMemcpy(odata, dev_buf, n * sizeof(int), cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy dev_buf to host error!!!");
cudaFree(dev_buf);
checkCUDAErrorWithLine("free dev_buf error!!!");
}
}
}
|
cb6a02d90e2cdb53fd69dadcac13ae6ac2cdcc9c.hip | // !!! This is a file automatically generated by hipify!!!
#include <gauge_field.h>
#include <color_spinor_field.h>
#include <dslash.h>
#include <worker.h>
#include <dslash_policy.cuh>
#include <kernels/dslash_domain_wall_5d.cuh>
/**
This is the gauged domain-wall 5-d preconditioned operator.
*/
namespace quda
{
/**
@brief This is a helper class that is used to instantiate the
correct templated kernel for the dslash.
*/
template <typename Float, int nDim, int nColor, int nParity, bool dagger, bool xpay, KernelType kernel_type, typename Arg>
struct DomainWall5DLaunch {
static constexpr const char *kernel = "quda::domainWall5DGPU"; // kernel name for jit compilation
template <typename Dslash>
inline static void launch(Dslash &dslash, TuneParam &tp, Arg &arg, const hipStream_t &stream)
{
dslash.launch(domainWall5DGPU<Float, nDim, nColor, nParity, dagger, xpay, kernel_type, Arg>, tp, arg, stream);
}
};
template <typename Float, int nDim, int nColor, typename Arg> class DomainWall5D : public Dslash<Float>
{
protected:
Arg &arg;
const ColorSpinorField ∈
public:
DomainWall5D(Arg &arg, const ColorSpinorField &out, const ColorSpinorField &in) :
Dslash<Float>(arg, out, in, "kernels/dslash_domain_wall_5d.cuh"),
arg(arg),
in(in)
{
TunableVectorYZ::resizeVector(in.X(4), arg.nParity);
}
virtual ~DomainWall5D() {}
void apply(const hipStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
Dslash<Float>::setParam(arg);
Dslash<Float>::template instantiate<DomainWall5DLaunch, nDim, nColor>(tp, arg, stream);
}
long long flops() const
{
long long flops = Dslash<Float>::flops();
switch (arg.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
case EXTERIOR_KERNEL_ALL: break; // 5-d flops are in the interior kernel
case INTERIOR_KERNEL:
case KERNEL_POLICY:
int Ls = in.X(4);
long long bulk = (Ls - 2) * (in.Volume() / Ls);
long long wall = 2 * (in.Volume() / Ls);
flops += 96ll * bulk + 120ll * wall;
break;
}
return flops;
}
long long bytes() const
{
bool isFixed = (in.Precision() == sizeof(short) || in.Precision() == sizeof(char)) ? true : false;
int spinor_bytes = 2 * in.Ncolor() * in.Nspin() * in.Precision() + (isFixed ? sizeof(float) : 0);
long long bytes = Dslash<Float>::bytes();
switch (arg.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
case EXTERIOR_KERNEL_ALL: break;
case INTERIOR_KERNEL:
case KERNEL_POLICY: bytes += 2 * spinor_bytes * in.VolumeCB(); break;
}
return bytes;
}
TuneKey tuneKey() const
{
return TuneKey(in.VolString(), typeid(*this).name(), Dslash<Float>::aux[arg.kernel_type]);
}
};
template <typename Float, int nColor, QudaReconstructType recon> struct DomainWall5DApply {
inline DomainWall5DApply(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U, double a,
double m_f, const ColorSpinorField &x, int parity, bool dagger, const int *comm_override, TimeProfile &profile)
{
constexpr int nDim = 5;
DomainWall5DArg<Float, nColor, recon> arg(out, in, U, a, m_f, a != 0.0, x, parity, dagger, comm_override);
DomainWall5D<Float, nDim, nColor, DomainWall5DArg<Float, nColor, recon>> twisted(arg, out, in);
dslash::DslashPolicyTune<decltype(twisted)> policy(twisted,
const_cast<cudaColorSpinorField *>(static_cast<const cudaColorSpinorField *>(&in)),
in.getDslashConstant().volume_4d_cb, in.getDslashConstant().ghostFaceCB, profile);
policy.apply(0);
checkCudaError();
}
};
// Apply the 4-d preconditioned domain-wall Dslash operator
// out(x) = M*in = in(x) + a*\sum_mu U_{-\mu}(x)in(x+mu) + U^\dagger_mu(x-mu)in(x-mu)
void ApplyDomainWall5D(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U, double a, double m_f,
const ColorSpinorField &x, int parity, bool dagger, const int *comm_override, TimeProfile &profile)
{
#ifdef GPU_DOMAIN_WALL_DIRAC
if (in.V() == out.V()) errorQuda("Aliasing pointers");
if (in.FieldOrder() != out.FieldOrder())
errorQuda("Field order mismatch in = %d, out = %d", in.FieldOrder(), out.FieldOrder());
// check all precisions match
checkPrecision(out, in, x, U);
// check all locations match
checkLocation(out, in, x, U);
// with 5-d checkerboarding we must use kernel packing
pushKernelPackT(true);
instantiate<DomainWall5DApply>(out, in, U, a, m_f, x, parity, dagger, comm_override, profile);
popKernelPackT();
#else
errorQuda("Domain-wall dslash has not been built");
#endif // GPU_DOMAIN_WALL_DIRAC
}
} // namespace quda
| cb6a02d90e2cdb53fd69dadcac13ae6ac2cdcc9c.cu | #include <gauge_field.h>
#include <color_spinor_field.h>
#include <dslash.h>
#include <worker.h>
#include <dslash_policy.cuh>
#include <kernels/dslash_domain_wall_5d.cuh>
/**
This is the gauged domain-wall 5-d preconditioned operator.
*/
namespace quda
{
/**
@brief This is a helper class that is used to instantiate the
correct templated kernel for the dslash.
*/
template <typename Float, int nDim, int nColor, int nParity, bool dagger, bool xpay, KernelType kernel_type, typename Arg>
struct DomainWall5DLaunch {
static constexpr const char *kernel = "quda::domainWall5DGPU"; // kernel name for jit compilation
template <typename Dslash>
inline static void launch(Dslash &dslash, TuneParam &tp, Arg &arg, const cudaStream_t &stream)
{
dslash.launch(domainWall5DGPU<Float, nDim, nColor, nParity, dagger, xpay, kernel_type, Arg>, tp, arg, stream);
}
};
template <typename Float, int nDim, int nColor, typename Arg> class DomainWall5D : public Dslash<Float>
{
protected:
Arg &arg;
const ColorSpinorField ∈
public:
DomainWall5D(Arg &arg, const ColorSpinorField &out, const ColorSpinorField &in) :
Dslash<Float>(arg, out, in, "kernels/dslash_domain_wall_5d.cuh"),
arg(arg),
in(in)
{
TunableVectorYZ::resizeVector(in.X(4), arg.nParity);
}
virtual ~DomainWall5D() {}
void apply(const cudaStream_t &stream)
{
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
Dslash<Float>::setParam(arg);
Dslash<Float>::template instantiate<DomainWall5DLaunch, nDim, nColor>(tp, arg, stream);
}
long long flops() const
{
long long flops = Dslash<Float>::flops();
switch (arg.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
case EXTERIOR_KERNEL_ALL: break; // 5-d flops are in the interior kernel
case INTERIOR_KERNEL:
case KERNEL_POLICY:
int Ls = in.X(4);
long long bulk = (Ls - 2) * (in.Volume() / Ls);
long long wall = 2 * (in.Volume() / Ls);
flops += 96ll * bulk + 120ll * wall;
break;
}
return flops;
}
long long bytes() const
{
bool isFixed = (in.Precision() == sizeof(short) || in.Precision() == sizeof(char)) ? true : false;
int spinor_bytes = 2 * in.Ncolor() * in.Nspin() * in.Precision() + (isFixed ? sizeof(float) : 0);
long long bytes = Dslash<Float>::bytes();
switch (arg.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
case EXTERIOR_KERNEL_ALL: break;
case INTERIOR_KERNEL:
case KERNEL_POLICY: bytes += 2 * spinor_bytes * in.VolumeCB(); break;
}
return bytes;
}
TuneKey tuneKey() const
{
return TuneKey(in.VolString(), typeid(*this).name(), Dslash<Float>::aux[arg.kernel_type]);
}
};
template <typename Float, int nColor, QudaReconstructType recon> struct DomainWall5DApply {
inline DomainWall5DApply(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U, double a,
double m_f, const ColorSpinorField &x, int parity, bool dagger, const int *comm_override, TimeProfile &profile)
{
constexpr int nDim = 5;
DomainWall5DArg<Float, nColor, recon> arg(out, in, U, a, m_f, a != 0.0, x, parity, dagger, comm_override);
DomainWall5D<Float, nDim, nColor, DomainWall5DArg<Float, nColor, recon>> twisted(arg, out, in);
dslash::DslashPolicyTune<decltype(twisted)> policy(twisted,
const_cast<cudaColorSpinorField *>(static_cast<const cudaColorSpinorField *>(&in)),
in.getDslashConstant().volume_4d_cb, in.getDslashConstant().ghostFaceCB, profile);
policy.apply(0);
checkCudaError();
}
};
// Apply the 4-d preconditioned domain-wall Dslash operator
// out(x) = M*in = in(x) + a*\sum_mu U_{-\mu}(x)in(x+mu) + U^\dagger_mu(x-mu)in(x-mu)
void ApplyDomainWall5D(ColorSpinorField &out, const ColorSpinorField &in, const GaugeField &U, double a, double m_f,
const ColorSpinorField &x, int parity, bool dagger, const int *comm_override, TimeProfile &profile)
{
#ifdef GPU_DOMAIN_WALL_DIRAC
if (in.V() == out.V()) errorQuda("Aliasing pointers");
if (in.FieldOrder() != out.FieldOrder())
errorQuda("Field order mismatch in = %d, out = %d", in.FieldOrder(), out.FieldOrder());
// check all precisions match
checkPrecision(out, in, x, U);
// check all locations match
checkLocation(out, in, x, U);
// with 5-d checkerboarding we must use kernel packing
pushKernelPackT(true);
instantiate<DomainWall5DApply>(out, in, U, a, m_f, x, parity, dagger, comm_override, profile);
popKernelPackT();
#else
errorQuda("Domain-wall dslash has not been built");
#endif // GPU_DOMAIN_WALL_DIRAC
}
} // namespace quda
|
df2368b669341273d92dcdf6f4082965e8a4d550.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// BlockDim = 32x16
//GridDim = w/32*h/16
extern "C" __global__ void InterleaveUV( unsigned char *yuv_cb, unsigned char *yuv_cr, unsigned char *nv12_chroma,
int chroma_width, int chroma_height, int cb_pitch, int cr_pitch, int nv12_pitch )
{
int x,y;
unsigned char *pCb;
unsigned char *pCr;
unsigned char *pDst;
x = blockIdx.x*blockDim.x+threadIdx.x;
y = blockIdx.y*blockDim.y+threadIdx.y;
if ((x < chroma_width) && (y < chroma_height))
{
pCb = yuv_cb + (y*cb_pitch);
pCr = yuv_cr + (y*cr_pitch);
pDst = nv12_chroma + y*nv12_pitch;
pDst[x << 1] = pCb[x];
pDst[(x << 1) + 1] = pCr[x];
}
}
// Simple NV12 bi-linear scaling using 2D textures
//
// blockDim {64,1,1}
texture<unsigned char, 2> luma_tex;
texture<uchar2, 2> chroma_tex;
typedef struct {
uchar2 uv0;
uchar2 uv1;
} uvpair_t;
extern "C" __global__ void Scale_Bilinear_NV12(unsigned char *dst, int dst_uv_offset,
int dst_width, int dst_height, int dst_pitch,
float left, float right,
float x_offset, float y_offset, float xc_offset, float yc_offset, float x_scale, float y_scale)
{
unsigned char *dsty, *dstuv;
uchar4 tmp0, tmp1;
uvpair_t tmp2;
int y0, tx;
float x, yt, yb, yc, leftuv, rightuv;
tx = (blockIdx.x << 8) + threadIdx.x * 4;
if (tx < dst_width)
{
y0 = blockIdx.y << 1;
// Luma
dsty = dst + __umul24(y0, dst_pitch);
yt = y_offset + (y0 + 0) * y_scale;
yb = y_offset + (y0 + 1) * y_scale;
x = 0.5f + fminf(fmaxf(x_offset + (tx + 0) * x_scale, left), right);
tmp0.x = tex2D(luma_tex, x, yt);
tmp1.x = tex2D(luma_tex, x, yb);
x = 0.5f + fminf(fmaxf(x_offset + (tx + 1) * x_scale, left), right);
tmp0.y = tex2D(luma_tex, x, yt);
tmp1.y = tex2D(luma_tex, x, yb);
x = 0.5f + fminf(fmaxf(x_offset + (tx + 2) * x_scale, left), right);
tmp0.z = tex2D(luma_tex, x, yt);
tmp1.z = tex2D(luma_tex, x, yb);
x = 0.5f + fminf(fmaxf(x_offset + (tx + 3) * x_scale, left), right);
tmp0.w = tex2D(luma_tex, x, yt);
tmp1.w = tex2D(luma_tex, x, yb);
*(uchar4 *)(dsty + tx) = tmp0;
*(uchar4 *)(dsty + tx + dst_pitch) = tmp1;
// Chroma
dstuv = dst + dst_uv_offset + __umul24(blockIdx.y, dst_pitch);
leftuv = 0.5f + 0.5f*left;
rightuv = 0.5f*(right + 1.0f - left) - 1.0f;
yc = yc_offset + (y0 >> 1) * y_scale;
x = leftuv + fminf(fmaxf(xc_offset + (tx >> 1) * x_scale - left, 0.0f), rightuv);
tmp2.uv0 = tex2D(chroma_tex, x, yc);
x = leftuv + fminf(fmaxf(xc_offset + ((tx + 2) >> 1) * x_scale - left, 0.0f), rightuv);
tmp2.uv1 = tex2D(chroma_tex, x, yc);
*(uvpair_t *)(dstuv + tx) = tmp2;
}
}
| df2368b669341273d92dcdf6f4082965e8a4d550.cu |
// BlockDim = 32x16
//GridDim = w/32*h/16
extern "C" __global__ void InterleaveUV( unsigned char *yuv_cb, unsigned char *yuv_cr, unsigned char *nv12_chroma,
int chroma_width, int chroma_height, int cb_pitch, int cr_pitch, int nv12_pitch )
{
int x,y;
unsigned char *pCb;
unsigned char *pCr;
unsigned char *pDst;
x = blockIdx.x*blockDim.x+threadIdx.x;
y = blockIdx.y*blockDim.y+threadIdx.y;
if ((x < chroma_width) && (y < chroma_height))
{
pCb = yuv_cb + (y*cb_pitch);
pCr = yuv_cr + (y*cr_pitch);
pDst = nv12_chroma + y*nv12_pitch;
pDst[x << 1] = pCb[x];
pDst[(x << 1) + 1] = pCr[x];
}
}
// Simple NV12 bi-linear scaling using 2D textures
//
// blockDim {64,1,1}
texture<unsigned char, 2> luma_tex;
texture<uchar2, 2> chroma_tex;
typedef struct {
uchar2 uv0;
uchar2 uv1;
} uvpair_t;
extern "C" __global__ void Scale_Bilinear_NV12(unsigned char *dst, int dst_uv_offset,
int dst_width, int dst_height, int dst_pitch,
float left, float right,
float x_offset, float y_offset, float xc_offset, float yc_offset, float x_scale, float y_scale)
{
unsigned char *dsty, *dstuv;
uchar4 tmp0, tmp1;
uvpair_t tmp2;
int y0, tx;
float x, yt, yb, yc, leftuv, rightuv;
tx = (blockIdx.x << 8) + threadIdx.x * 4;
if (tx < dst_width)
{
y0 = blockIdx.y << 1;
// Luma
dsty = dst + __umul24(y0, dst_pitch);
yt = y_offset + (y0 + 0) * y_scale;
yb = y_offset + (y0 + 1) * y_scale;
x = 0.5f + fminf(fmaxf(x_offset + (tx + 0) * x_scale, left), right);
tmp0.x = tex2D(luma_tex, x, yt);
tmp1.x = tex2D(luma_tex, x, yb);
x = 0.5f + fminf(fmaxf(x_offset + (tx + 1) * x_scale, left), right);
tmp0.y = tex2D(luma_tex, x, yt);
tmp1.y = tex2D(luma_tex, x, yb);
x = 0.5f + fminf(fmaxf(x_offset + (tx + 2) * x_scale, left), right);
tmp0.z = tex2D(luma_tex, x, yt);
tmp1.z = tex2D(luma_tex, x, yb);
x = 0.5f + fminf(fmaxf(x_offset + (tx + 3) * x_scale, left), right);
tmp0.w = tex2D(luma_tex, x, yt);
tmp1.w = tex2D(luma_tex, x, yb);
*(uchar4 *)(dsty + tx) = tmp0;
*(uchar4 *)(dsty + tx + dst_pitch) = tmp1;
// Chroma
dstuv = dst + dst_uv_offset + __umul24(blockIdx.y, dst_pitch);
leftuv = 0.5f + 0.5f*left;
rightuv = 0.5f*(right + 1.0f - left) - 1.0f;
yc = yc_offset + (y0 >> 1) * y_scale;
x = leftuv + fminf(fmaxf(xc_offset + (tx >> 1) * x_scale - left, 0.0f), rightuv);
tmp2.uv0 = tex2D(chroma_tex, x, yc);
x = leftuv + fminf(fmaxf(xc_offset + ((tx + 2) >> 1) * x_scale - left, 0.0f), rightuv);
tmp2.uv1 = tex2D(chroma_tex, x, yc);
*(uvpair_t *)(dstuv + tx) = tmp2;
}
}
|
6e2cfc18aefa04c3c72789bc666bab583f843d32.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Derived from the nVIDIA CUDA 8.0 samples by
*
* Eyal Rozenberg <[email protected]>
*
* The derivation is specifically permitted in the nVIDIA CUDA Samples EULA
* and the deriver is the owner of this code according to the EULA.
*
* Use this reasonably. If you want to discuss licensing formalities, please
* contact the author.
*
* Modified by VinInn for testing math funcs
*/
/* to run test
foreach f ( $CMSSW_BASE/test/$SCRAM_ARCH/DFM_Vector* )
echo $f; $f
end
*/
#include <algorithm>
#include <cassert>
#include <chrono>
#include <iomanip>
#include <iostream>
#include <memory>
#include <random>
#include <stdexcept>
#include "DataFormats/Math/interface/approx_atan2.h"
#include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/requireDevices.h"
#include "HeterogeneousCore/CUDAUtilities/interface/launch.h"
constexpr float xmin = -100.001; // avoid 0
constexpr float incr = 0.04;
constexpr int Nsteps = 2. * std::abs(xmin) / incr;
template <int DEGREE>
__global__ void diffAtan(int *diffs) {
auto mdiff = &diffs[0];
auto idiff = &diffs[1];
auto sdiff = &diffs[2];
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
auto x = xmin + incr * i;
auto y = xmin + incr * j;
auto approx = unsafe_atan2f<DEGREE>(y, x);
auto iapprox = unsafe_atan2i<DEGREE>(y, x);
auto sapprox = unsafe_atan2s<DEGREE>(y, x);
auto std = std::atan2(y, x);
auto fd = std::abs(std - approx);
atomicMax(mdiff, int(fd * 1.e7));
atomicMax(idiff, std::abs(phi2int(std) - iapprox));
short dd = std::abs(phi2short(std) - sapprox);
atomicMax(sdiff, int(dd));
}
template <int DEGREE>
void go() {
auto start = std::chrono::high_resolution_clock::now();
auto delta = start - start;
// atan2
delta -= (std::chrono::high_resolution_clock::now() - start);
auto diff_d = cms::cuda::make_device_unique<int[]>(3, nullptr);
int diffs[3];
cudaCheck(hipMemset(diff_d.get(), 0, 3 * 4));
// Launch the diff CUDA Kernel
dim3 threadsPerBlock(32, 32, 1);
dim3 blocksPerGrid(
(Nsteps + threadsPerBlock.x - 1) / threadsPerBlock.x, (Nsteps + threadsPerBlock.y - 1) / threadsPerBlock.y, 1);
std::cout << "CUDA kernel 'diff' launch with " << blocksPerGrid.x << " blocks of " << threadsPerBlock.y
<< " threads\n";
cms::cuda::launch(diffAtan<DEGREE>, {blocksPerGrid, threadsPerBlock}, diff_d.get());
cudaCheck(hipMemcpy(diffs, diff_d.get(), 3 * 4, hipMemcpyDeviceToHost));
delta += (std::chrono::high_resolution_clock::now() - start);
float mdiff = diffs[0] * 1.e-7;
int idiff = diffs[1];
int sdiff = diffs[2];
std::cout << "for degree " << DEGREE << " max diff is " << mdiff << ' ' << idiff << ' ' << int2phi(idiff) << ' '
<< sdiff << ' ' << short2phi(sdiff) << std::endl;
std::cout << "cuda computation took " << std::chrono::duration_cast<std::chrono::milliseconds>(delta).count() << " ms"
<< std::endl;
}
int main() {
cms::cudatest::requireDevices();
try {
go<3>();
go<5>();
go<7>();
go<9>();
} catch (std::runtime_error &ex) {
std::cerr << "CUDA or std runtime error: " << ex.what() << std::endl;
exit(EXIT_FAILURE);
} catch (...) {
std::cerr << "A non-CUDA error occurred" << std::endl;
exit(EXIT_FAILURE);
}
return EXIT_SUCCESS;
}
| 6e2cfc18aefa04c3c72789bc666bab583f843d32.cu | /**
* Derived from the nVIDIA CUDA 8.0 samples by
*
* Eyal Rozenberg <[email protected]>
*
* The derivation is specifically permitted in the nVIDIA CUDA Samples EULA
* and the deriver is the owner of this code according to the EULA.
*
* Use this reasonably. If you want to discuss licensing formalities, please
* contact the author.
*
* Modified by VinInn for testing math funcs
*/
/* to run test
foreach f ( $CMSSW_BASE/test/$SCRAM_ARCH/DFM_Vector* )
echo $f; $f
end
*/
#include <algorithm>
#include <cassert>
#include <chrono>
#include <iomanip>
#include <iostream>
#include <memory>
#include <random>
#include <stdexcept>
#include "DataFormats/Math/interface/approx_atan2.h"
#include "HeterogeneousCore/CUDAUtilities/interface/device_unique_ptr.h"
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/requireDevices.h"
#include "HeterogeneousCore/CUDAUtilities/interface/launch.h"
constexpr float xmin = -100.001; // avoid 0
constexpr float incr = 0.04;
constexpr int Nsteps = 2. * std::abs(xmin) / incr;
template <int DEGREE>
__global__ void diffAtan(int *diffs) {
auto mdiff = &diffs[0];
auto idiff = &diffs[1];
auto sdiff = &diffs[2];
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
auto x = xmin + incr * i;
auto y = xmin + incr * j;
auto approx = unsafe_atan2f<DEGREE>(y, x);
auto iapprox = unsafe_atan2i<DEGREE>(y, x);
auto sapprox = unsafe_atan2s<DEGREE>(y, x);
auto std = std::atan2(y, x);
auto fd = std::abs(std - approx);
atomicMax(mdiff, int(fd * 1.e7));
atomicMax(idiff, std::abs(phi2int(std) - iapprox));
short dd = std::abs(phi2short(std) - sapprox);
atomicMax(sdiff, int(dd));
}
template <int DEGREE>
void go() {
auto start = std::chrono::high_resolution_clock::now();
auto delta = start - start;
// atan2
delta -= (std::chrono::high_resolution_clock::now() - start);
auto diff_d = cms::cuda::make_device_unique<int[]>(3, nullptr);
int diffs[3];
cudaCheck(cudaMemset(diff_d.get(), 0, 3 * 4));
// Launch the diff CUDA Kernel
dim3 threadsPerBlock(32, 32, 1);
dim3 blocksPerGrid(
(Nsteps + threadsPerBlock.x - 1) / threadsPerBlock.x, (Nsteps + threadsPerBlock.y - 1) / threadsPerBlock.y, 1);
std::cout << "CUDA kernel 'diff' launch with " << blocksPerGrid.x << " blocks of " << threadsPerBlock.y
<< " threads\n";
cms::cuda::launch(diffAtan<DEGREE>, {blocksPerGrid, threadsPerBlock}, diff_d.get());
cudaCheck(cudaMemcpy(diffs, diff_d.get(), 3 * 4, cudaMemcpyDeviceToHost));
delta += (std::chrono::high_resolution_clock::now() - start);
float mdiff = diffs[0] * 1.e-7;
int idiff = diffs[1];
int sdiff = diffs[2];
std::cout << "for degree " << DEGREE << " max diff is " << mdiff << ' ' << idiff << ' ' << int2phi(idiff) << ' '
<< sdiff << ' ' << short2phi(sdiff) << std::endl;
std::cout << "cuda computation took " << std::chrono::duration_cast<std::chrono::milliseconds>(delta).count() << " ms"
<< std::endl;
}
int main() {
cms::cudatest::requireDevices();
try {
go<3>();
go<5>();
go<7>();
go<9>();
} catch (std::runtime_error &ex) {
std::cerr << "CUDA or std runtime error: " << ex.what() << std::endl;
exit(EXIT_FAILURE);
} catch (...) {
std::cerr << "A non-CUDA error occurred" << std::endl;
exit(EXIT_FAILURE);
}
return EXIT_SUCCESS;
}
|
60570ca40c0cfaedf2c7e363b8b15362724d8233.hip | // !!! This is a file automatically generated by hipify!!!
//*LB*
// Copyright (c) 2010, University of Bonn, Institute for Computer Science VI
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of the University of Bonn
// nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written
// permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//*LE*
#include <string>
#include <stdexcept>
#include <iostream>
#include <hip/hip_runtime.h>
/*#include <cutil_inline.h>*/
#include "cuv_general.hpp"
#include "exception_helper.hpp"
namespace cuv{
using namespace std;
void cuvAssertFailed(const char *msg){
/*cout << "cuvAssert failed: " << msg <<endl;*/
/*abort();*/
/*ExceptionTracer et;*/
throw std::runtime_error(std::string(msg));
}
void checkCudaError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
/*cout << "checkCudaError: " << msg << ": " << hipGetErrorString(err) <<endl;*/
/*abort();*/
/*ExceptionTracer et;*/
throw std::runtime_error(std::string(msg) + hipGetErrorString(err) );
}
}
void initCUDA(int dev){
// if we get a negative device, do nothing.
if (dev<0) return;
cuvSafeCall(hipSetDevice(dev));
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
/*bool canHostmap = prop.canMapHostMemory;*/
/*if(canHostmap){*/
/* cuvSafeCall(hipSetDeviceFlags(hipDeviceMapHost));*/
/*}*/
}
void exitCUDA(){
hipDeviceReset();
}
void safeThreadSync(){
hipDeviceSynchronize();
checkCudaError("Save Thread Sync");
}
}
| 60570ca40c0cfaedf2c7e363b8b15362724d8233.cu | //*LB*
// Copyright (c) 2010, University of Bonn, Institute for Computer Science VI
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of the University of Bonn
// nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written
// permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//*LE*
#include <string>
#include <stdexcept>
#include <iostream>
#include <cuda.h>
/*#include <cutil_inline.h>*/
#include "cuv_general.hpp"
#include "exception_helper.hpp"
namespace cuv{
using namespace std;
void cuvAssertFailed(const char *msg){
/*cout << "cuvAssert failed: " << msg <<endl;*/
/*abort();*/
/*ExceptionTracer et;*/
throw std::runtime_error(std::string(msg));
}
void checkCudaError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
/*cout << "checkCudaError: " << msg << ": " << cudaGetErrorString(err) <<endl;*/
/*abort();*/
/*ExceptionTracer et;*/
throw std::runtime_error(std::string(msg) + cudaGetErrorString(err) );
}
}
void initCUDA(int dev){
// if we get a negative device, do nothing.
if (dev<0) return;
cuvSafeCall(cudaSetDevice(dev));
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
/*bool canHostmap = prop.canMapHostMemory;*/
/*if(canHostmap){*/
/* cuvSafeCall(cudaSetDeviceFlags(cudaDeviceMapHost));*/
/*}*/
}
void exitCUDA(){
cudaThreadExit();
}
void safeThreadSync(){
cudaThreadSynchronize();
checkCudaError("Save Thread Sync");
}
}
|
d5df1cabd16deb0fbfd045b5514c29f9c35b3d0d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common.hpp"
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/scan.h>
#include <hip/hip_runtime.h>
#include <nvtx3/roctracer/roctx.h>
/**
* @brief Computes the size of each output row
*
* This thread is called once per row in d_names.
*
* @param d_names Column of names
* @param d_visibilities Column of visibilities
* @param d_sizes Output sizes for each row
*/
__global__ void sizes_kernel(cudf::column_device_view const d_names,
cudf::column_device_view const d_visibilities,
cudf::size_type* d_sizes)
{
// The row index is resolved from the CUDA thread/block objects
auto index = threadIdx.x + blockIdx.x * blockDim.x;
// There may be more threads than actual rows
if (index >= d_names.size()) return;
auto const visible = cudf::string_view("public", 6);
auto const redaction = cudf::string_view("X X", 3);
auto const name = d_names.element<cudf::string_view>(index);
auto const vis = d_visibilities.element<cudf::string_view>(index);
cudf::size_type result = redaction.size_bytes(); // init to redaction size
if (vis == visible) {
auto const space_idx = name.find(' ');
auto const first = name.substr(0, space_idx);
auto const last_initial = name.substr(space_idx + 1, 1);
result = first.size_bytes() + last_initial.size_bytes() + 1;
}
d_sizes[index] = result;
}
/**
* @brief Builds the output for each row
*
* This thread is called once per row in d_names.
*
* @param d_names Column of names
* @param d_visibilities Column of visibilities
* @param d_offsets Byte offset in `d_chars` for each row
* @param d_chars Output memory for all rows
*/
__global__ void redact_kernel(cudf::column_device_view const d_names,
cudf::column_device_view const d_visibilities,
cudf::size_type const* d_offsets,
char* d_chars)
{
// The row index is resolved from the CUDA thread/block objects
auto index = threadIdx.x + blockIdx.x * blockDim.x;
// There may be more threads than actual rows
if (index >= d_names.size()) return;
auto const visible = cudf::string_view("public", 6);
auto const redaction = cudf::string_view("X X", 3);
// resolve output_ptr using the offsets vector
char* output_ptr = d_chars + d_offsets[index];
auto const name = d_names.element<cudf::string_view>(index);
auto const vis = d_visibilities.element<cudf::string_view>(index);
if (vis == visible) {
auto const space_idx = name.find(' ');
auto const first = name.substr(0, space_idx);
auto const last_initial = name.substr(space_idx + 1, 1);
auto const output_size = first.size_bytes() + last_initial.size_bytes() + 1;
// build output string
memcpy(output_ptr, last_initial.data(), last_initial.size_bytes());
output_ptr += last_initial.size_bytes();
*output_ptr++ = ' ';
memcpy(output_ptr, first.data(), first.size_bytes());
} else {
memcpy(output_ptr, redaction.data(), redaction.size_bytes());
}
}
/**
* @brief Redacts each name per the corresponding visibility entry
*
* This implementation builds the strings column children (offsets and chars)
* directly into device memory for libcudf.
*
* @param names Column of names
* @param visibilities Column of visibilities
* @return Redacted column of names
*/
std::unique_ptr<cudf::column> redact_strings(cudf::column_view const& names,
cudf::column_view const& visibilities)
{
// all device memory operations and kernel functions will run on this stream
auto stream = rmm::cuda_stream_default;
auto const d_names = cudf::column_device_view::create(names, stream);
auto const d_visibilities = cudf::column_device_view::create(visibilities, stream);
constexpr int block_size = 128; // this arbitrary size should be a power of 2
int const blocks = (names.size() + block_size - 1) / block_size;
roctxRangePushA("redact_strings");
// create offsets vector
auto offsets = rmm::device_uvector<cudf::size_type>(names.size() + 1, stream);
// compute output sizes
hipLaunchKernelGGL(( sizes_kernel), dim3(blocks), dim3(block_size), 0, stream.value(),
*d_names, *d_visibilities, offsets.data());
// convert sizes to offsets (in place)
thrust::exclusive_scan(rmm::exec_policy(stream), offsets.begin(), offsets.end(), offsets.begin());
// last element is the total output size
// (device-to-host copy of 1 integer -- includes syncing the stream)
cudf::size_type output_size = offsets.back_element(stream);
// create chars vector
auto chars = rmm::device_uvector<char>(output_size, stream);
// build chars output
hipLaunchKernelGGL(( redact_kernel), dim3(blocks), dim3(block_size), 0, stream.value(),
*d_names, *d_visibilities, offsets.data(), chars.data());
// create column from offsets and chars vectors (no copy is performed)
auto result =
cudf::make_strings_column(names.size(), std::move(offsets), std::move(chars), {}, 0);
// wait for all of the above to finish
stream.synchronize();
roctxRangePop();
return result;
}
| d5df1cabd16deb0fbfd045b5514c29f9c35b3d0d.cu | /*
* Copyright (c) 2022-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common.hpp"
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <rmm/cuda_stream_view.hpp>
#include <rmm/device_uvector.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/scan.h>
#include <cuda_runtime.h>
#include <nvtx3/nvToolsExt.h>
/**
* @brief Computes the size of each output row
*
* This thread is called once per row in d_names.
*
* @param d_names Column of names
* @param d_visibilities Column of visibilities
* @param d_sizes Output sizes for each row
*/
__global__ void sizes_kernel(cudf::column_device_view const d_names,
cudf::column_device_view const d_visibilities,
cudf::size_type* d_sizes)
{
// The row index is resolved from the CUDA thread/block objects
auto index = threadIdx.x + blockIdx.x * blockDim.x;
// There may be more threads than actual rows
if (index >= d_names.size()) return;
auto const visible = cudf::string_view("public", 6);
auto const redaction = cudf::string_view("X X", 3);
auto const name = d_names.element<cudf::string_view>(index);
auto const vis = d_visibilities.element<cudf::string_view>(index);
cudf::size_type result = redaction.size_bytes(); // init to redaction size
if (vis == visible) {
auto const space_idx = name.find(' ');
auto const first = name.substr(0, space_idx);
auto const last_initial = name.substr(space_idx + 1, 1);
result = first.size_bytes() + last_initial.size_bytes() + 1;
}
d_sizes[index] = result;
}
/**
* @brief Builds the output for each row
*
* This thread is called once per row in d_names.
*
* @param d_names Column of names
* @param d_visibilities Column of visibilities
* @param d_offsets Byte offset in `d_chars` for each row
* @param d_chars Output memory for all rows
*/
__global__ void redact_kernel(cudf::column_device_view const d_names,
cudf::column_device_view const d_visibilities,
cudf::size_type const* d_offsets,
char* d_chars)
{
// The row index is resolved from the CUDA thread/block objects
auto index = threadIdx.x + blockIdx.x * blockDim.x;
// There may be more threads than actual rows
if (index >= d_names.size()) return;
auto const visible = cudf::string_view("public", 6);
auto const redaction = cudf::string_view("X X", 3);
// resolve output_ptr using the offsets vector
char* output_ptr = d_chars + d_offsets[index];
auto const name = d_names.element<cudf::string_view>(index);
auto const vis = d_visibilities.element<cudf::string_view>(index);
if (vis == visible) {
auto const space_idx = name.find(' ');
auto const first = name.substr(0, space_idx);
auto const last_initial = name.substr(space_idx + 1, 1);
auto const output_size = first.size_bytes() + last_initial.size_bytes() + 1;
// build output string
memcpy(output_ptr, last_initial.data(), last_initial.size_bytes());
output_ptr += last_initial.size_bytes();
*output_ptr++ = ' ';
memcpy(output_ptr, first.data(), first.size_bytes());
} else {
memcpy(output_ptr, redaction.data(), redaction.size_bytes());
}
}
/**
* @brief Redacts each name per the corresponding visibility entry
*
* This implementation builds the strings column children (offsets and chars)
* directly into device memory for libcudf.
*
* @param names Column of names
* @param visibilities Column of visibilities
* @return Redacted column of names
*/
std::unique_ptr<cudf::column> redact_strings(cudf::column_view const& names,
cudf::column_view const& visibilities)
{
// all device memory operations and kernel functions will run on this stream
auto stream = rmm::cuda_stream_default;
auto const d_names = cudf::column_device_view::create(names, stream);
auto const d_visibilities = cudf::column_device_view::create(visibilities, stream);
constexpr int block_size = 128; // this arbitrary size should be a power of 2
int const blocks = (names.size() + block_size - 1) / block_size;
nvtxRangePushA("redact_strings");
// create offsets vector
auto offsets = rmm::device_uvector<cudf::size_type>(names.size() + 1, stream);
// compute output sizes
sizes_kernel<<<blocks, block_size, 0, stream.value()>>>(
*d_names, *d_visibilities, offsets.data());
// convert sizes to offsets (in place)
thrust::exclusive_scan(rmm::exec_policy(stream), offsets.begin(), offsets.end(), offsets.begin());
// last element is the total output size
// (device-to-host copy of 1 integer -- includes syncing the stream)
cudf::size_type output_size = offsets.back_element(stream);
// create chars vector
auto chars = rmm::device_uvector<char>(output_size, stream);
// build chars output
redact_kernel<<<blocks, block_size, 0, stream.value()>>>(
*d_names, *d_visibilities, offsets.data(), chars.data());
// create column from offsets and chars vectors (no copy is performed)
auto result =
cudf::make_strings_column(names.size(), std::move(offsets), std::move(chars), {}, 0);
// wait for all of the above to finish
stream.synchronize();
nvtxRangePop();
return result;
}
|
91ffb91c8da50fffe875381fcb488f78247efd20.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
constexpr float EPSILON = 1e-12;
namespace {
using namespace at;
void binary_cross_entropy_backward_out_kernel(Tensor& grad_input, const Tensor& grad, const Tensor& input, const Tensor& target) {
at::TensorIterator iter;
iter.add_output(grad_input);
iter.add_input(grad);
iter.add_input(input);
iter.add_input(target);
iter.build();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_backward_out_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "binary_cross_entropy_backward_out_cuda", [&] {
at::native::gpu_kernel(iter, [] GPU_LAMBDA (
scalar_t grad_val,
scalar_t input_val,
scalar_t target_val
) -> scalar_t {
const scalar_t one = 1;
const scalar_t epsilon = EPSILON;
scalar_t grad_input_denominator = max(
(one - input_val) * input_val,
epsilon
);
return grad_val * (input_val - target_val) / grad_input_denominator;
}
);
});
});
}
} // namespace
namespace at { namespace native {
Tensor kl_div_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, int64_t reduction) {
auto grad_input = at::empty_like(input);
TensorIterator iter;
iter.add_output(grad_input);
iter.add_input(target);
iter.add_input(grad);
iter.build();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "kl_div_backward_cuda", [&]() {
scalar_t inv = (reduction == at::Reduction::Mean) ? scalar_t(1.0 / input.numel()) : scalar_t(1.0);
gpu_kernel(iter,
[inv] GPU_LAMBDA (scalar_t target_val, scalar_t grad_val) {
return (target_val > 0) ? scalar_t(-target_val * grad_val * inv) : scalar_t(0.0);
});
});
return grad_input;
}
Tensor binary_cross_entropy_cuda(const Tensor& input, const Tensor& target, const Tensor& weight, int64_t reduction) {
Tensor loss = at::empty_like(input);
return at::native::binary_cross_entropy_out_cuda(loss, input, target, weight, reduction);
}
Tensor& binary_cross_entropy_out_cuda(Tensor& loss, const Tensor& input, const Tensor& target, const Tensor& weight, int64_t reduction) {
Tensor loss_squeezed = at::squeeze(loss);
TensorIterator iter;
iter.add_output(loss_squeezed);
iter.add_input(at::squeeze(input));
iter.add_input(at::squeeze(target));
iter.build();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_out_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "binary_cross_entropy_out_cuda", [&] {
gpu_kernel(iter,
[] GPU_LAMBDA (scalar_t input_val, scalar_t target_val) -> scalar_t {
const scalar_t zero = 0;
const scalar_t one = 1;
const scalar_t neg_100 = -100;
CUDA_KERNEL_ASSERT(input_val >= zero && input_val <= one);
scalar_t log_input_val = ::log(input_val);
scalar_t log_1_minus_input_val = ::log(one - input_val);
log_input_val = ::max(log_input_val, neg_100);
log_1_minus_input_val = ::max(log_1_minus_input_val, neg_100);
return ((target_val - one) * log_1_minus_input_val) - (target_val * log_input_val);
}
);
});
});
if (weight.defined()) {
loss.mul_(weight);
}
if (reduction != at::Reduction::None) {
Tensor loss_reduced;
if (reduction == at::Reduction::Mean) {
loss_reduced = loss.mean();
} else if (reduction == at::Reduction::Sum) {
loss_reduced = loss.sum();
}
loss.resize_as_(loss_reduced).copy_(loss_reduced);
}
return loss;
}
Tensor binary_cross_entropy_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const Tensor& weight, int64_t reduction) {
Tensor grad_input = at::empty_like(input);
return at::native::binary_cross_entropy_backward_out_cuda(grad_input, grad, input, target, weight, reduction);
}
Tensor& binary_cross_entropy_backward_out_cuda(Tensor& grad_input, const Tensor& grad, const Tensor& input, const Tensor& target, const Tensor& weight, int64_t reduction) {
Tensor grad_expand = grad.expand_as(input);
binary_cross_entropy_backward_out_kernel(grad_input, grad_expand, input, target);
if (weight.defined()) {
grad_input.mul_(weight);
}
if (reduction == at::Reduction::Mean) {
grad_input.div_(input.numel());
}
return grad_input;
}
}} // namespace at::native
| 91ffb91c8da50fffe875381fcb488f78247efd20.cu | #include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
constexpr float EPSILON = 1e-12;
namespace {
using namespace at;
void binary_cross_entropy_backward_out_kernel(Tensor& grad_input, const Tensor& grad, const Tensor& input, const Tensor& target) {
at::TensorIterator iter;
iter.add_output(grad_input);
iter.add_input(grad);
iter.add_input(input);
iter.add_input(target);
iter.build();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_backward_out_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "binary_cross_entropy_backward_out_cuda", [&] {
at::native::gpu_kernel(iter, [] GPU_LAMBDA (
scalar_t grad_val,
scalar_t input_val,
scalar_t target_val
) -> scalar_t {
const scalar_t one = 1;
const scalar_t epsilon = EPSILON;
scalar_t grad_input_denominator = max(
(one - input_val) * input_val,
epsilon
);
return grad_val * (input_val - target_val) / grad_input_denominator;
}
);
});
});
}
} // namespace
namespace at { namespace native {
Tensor kl_div_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, int64_t reduction) {
auto grad_input = at::empty_like(input);
TensorIterator iter;
iter.add_output(grad_input);
iter.add_input(target);
iter.add_input(grad);
iter.build();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "kl_div_backward_cuda", [&]() {
scalar_t inv = (reduction == at::Reduction::Mean) ? scalar_t(1.0 / input.numel()) : scalar_t(1.0);
gpu_kernel(iter,
[inv] GPU_LAMBDA (scalar_t target_val, scalar_t grad_val) {
return (target_val > 0) ? scalar_t(-target_val * grad_val * inv) : scalar_t(0.0);
});
});
return grad_input;
}
Tensor binary_cross_entropy_cuda(const Tensor& input, const Tensor& target, const Tensor& weight, int64_t reduction) {
Tensor loss = at::empty_like(input);
return at::native::binary_cross_entropy_out_cuda(loss, input, target, weight, reduction);
}
Tensor& binary_cross_entropy_out_cuda(Tensor& loss, const Tensor& input, const Tensor& target, const Tensor& weight, int64_t reduction) {
Tensor loss_squeezed = at::squeeze(loss);
TensorIterator iter;
iter.add_output(loss_squeezed);
iter.add_input(at::squeeze(input));
iter.add_input(at::squeeze(target));
iter.build();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_out_cuda", [&]() {
AT_SKIP_BFLOAT16_IF_NOT_ROCM(scalar_t, "binary_cross_entropy_out_cuda", [&] {
gpu_kernel(iter,
[] GPU_LAMBDA (scalar_t input_val, scalar_t target_val) -> scalar_t {
const scalar_t zero = 0;
const scalar_t one = 1;
const scalar_t neg_100 = -100;
CUDA_KERNEL_ASSERT(input_val >= zero && input_val <= one);
scalar_t log_input_val = std::log(input_val);
scalar_t log_1_minus_input_val = std::log(one - input_val);
log_input_val = std::max(log_input_val, neg_100);
log_1_minus_input_val = std::max(log_1_minus_input_val, neg_100);
return ((target_val - one) * log_1_minus_input_val) - (target_val * log_input_val);
}
);
});
});
if (weight.defined()) {
loss.mul_(weight);
}
if (reduction != at::Reduction::None) {
Tensor loss_reduced;
if (reduction == at::Reduction::Mean) {
loss_reduced = loss.mean();
} else if (reduction == at::Reduction::Sum) {
loss_reduced = loss.sum();
}
loss.resize_as_(loss_reduced).copy_(loss_reduced);
}
return loss;
}
Tensor binary_cross_entropy_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const Tensor& weight, int64_t reduction) {
Tensor grad_input = at::empty_like(input);
return at::native::binary_cross_entropy_backward_out_cuda(grad_input, grad, input, target, weight, reduction);
}
Tensor& binary_cross_entropy_backward_out_cuda(Tensor& grad_input, const Tensor& grad, const Tensor& input, const Tensor& target, const Tensor& weight, int64_t reduction) {
Tensor grad_expand = grad.expand_as(input);
binary_cross_entropy_backward_out_kernel(grad_input, grad_expand, input, target);
if (weight.defined()) {
grad_input.mul_(weight);
}
if (reduction == at::Reduction::Mean) {
grad_input.div_(input.numel());
}
return grad_input;
}
}} // namespace at::native
|
f8775c92efb6dccdeeb1bef55a99316e51001013.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include "cuda/deform_3d_im2col_cuda.cuh"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
// #include <THH/THH.h>
// #include <THH/THHAtomics.cuh>
// #include <THH/THHDeviceUtils.cuh>
at::Tensor
deform_conv3d_cuda_forward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const int kernel_d,
const int kernel_h,
const int kernel_w,
const int stride_d,
const int stride_h,
const int stride_w,
const int pad_d,
const int pad_h,
const int pad_w,
const int dilation_d,
const int dilation_h,
const int dilation_w,
const int group,
const int deformable_group,
const int im2col_step)
{
// THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask));
AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous");
AT_ASSERTM(weight.is_contiguous(), "weight tensor has to be contiguous");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int depth = input.size(2);
const int height = input.size(3);
const int width = input.size(4);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_d_ = weight.size(2);
const int kernel_h_ = weight.size(3);
const int kernel_w_ = weight.size(4);
const int im2col_step_ = ::min(batch, im2col_step);
AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_)
AT_ASSERTM((channels % group == 0) && (channels_out % group == 0),
"channels(%d) and channels_out(%d) must divide group(%d)", channels, channels_out, group)
// printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h);
// printf("Channels: %d %d\n", channels, channels_kernel);
// printf("Channels: %d %d\n", channels_out, channels_kernel);
AT_ASSERTM(kernel_d_ == kernel_d && kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d x %d vs %d x %d x %d).",
kernel_d, kernel_h, kernel_w, kernel_d_, kernel_h_, kernel_w_);
AT_ASSERTM(channels == (channels_kernel * group),
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group);
const int depth_out = (depth + 2 * pad_d - (dilation_d * (kernel_d - 1) + 1)) / stride_d + 1;
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
auto output = at::empty({batch * depth_out * height_out * width_out, channels_out}, input.options());
// prepare group weight and bias
auto weight_g = weight.view({group, channels_out/group, channels_kernel, kernel_d, kernel_h, kernel_w});
auto bias_g = bias.view({group, channels_out/group});
// define alias for easy use
const int batch_n = im2col_step_;
const int per_input_size = channels * depth * height * width;
const int per_offset_size = offset.size(1) * offset.size(2) * offset.size(3) * offset.size(4);
auto output_n = output.view({batch/im2col_step_, batch_n * depth_out * height_out * width_out, channels_out});
for (int n = 0; n < batch/im2col_step_; ++n)
{
auto columns = at::empty({channels * kernel_d * kernel_h * kernel_w, batch_n * depth_out * height_out * width_out}, input.options());
AT_DISPATCH_FLOATING_TYPES(input.type(), "deform_conv_forward_cuda", ([&] {
deformable_3d_im2col_cuda(at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input.data<scalar_t>() + n * im2col_step_ * per_input_size,
offset.data<scalar_t>() + n * im2col_step_ * per_offset_size,
batch_n, channels, depth, height, width,
depth_out, height_out, width_out,
kernel_d, kernel_h, kernel_w,
pad_d, pad_h, pad_w,
stride_d, stride_h, stride_w,
dilation_d, dilation_h, dilation_w,
deformable_group,
columns.data<scalar_t>());
}));
auto columns_g = columns.view({group, channels/group * kernel_d * kernel_h * kernel_w, batch_n * depth_out * height_out * width_out});
auto output_g = output_n.select(0, n).view({batch_n * depth_out * height_out * width_out, group, channels_out/group});
for (int g = 0; g < group; ++g)
{
auto columns_gm = columns_g.select(0, g).t();
auto weight_gm = weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_d * kernel_h * kernel_w}).t();
auto output_m = at::addmm(bias_g.select(0, g), columns_gm, weight_gm);
output_g.select(1, g) = output_m.view({batch_n * depth_out * height_out * width_out, channels_out/group});
}
}
output = output.view({batch, depth_out, height_out, width_out, channels_out}).permute({0, 4, 1, 2, 3}).contiguous();
return output;
}
std::vector<at::Tensor> deform_conv3d_cuda_backward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const at::Tensor &grad_output,
const int kernel_d,
const int kernel_h,
const int kernel_w,
const int stride_d,
const int stride_h,
const int stride_w,
const int pad_d,
const int pad_h,
const int pad_w,
const int dilation_d,
const int dilation_h,
const int dilation_w,
const int group,
const int deformable_group,
const int im2col_step)
{
AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous");
AT_ASSERTM(weight.is_contiguous(), "weight tensor has to be contiguous");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int depth = input.size(2);
const int height = input.size(3);
const int width = input.size(4);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_d_ = weight.size(2);
const int kernel_h_ = weight.size(3);
const int kernel_w_ = weight.size(4);
const int batch_ = grad_output.size(0);
const int channels_out_ = grad_output.size(1);
const int depth_out_ = grad_output.size(2);
const int height_out_ = grad_output.size(3);
const int width_out_ = grad_output.size(4);
const int im2col_step_ = ::min(im2col_step, batch);
AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_)
AT_ASSERTM((channels % group == 0) && (channels_out % group == 0),
"channels(%d) and channels_out(%d) must divide group(%d)", channels, channels_out, group)
AT_ASSERTM(kernel_d_ == kernel_d && kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d x %d vs %d x %d x %d).", kernel_d, kernel_h, kernel_w, kernel_d_, kernel_h_, kernel_w_);
AT_ASSERTM(channels == (channels_kernel * group),
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group);
const int depth_out = (depth + 2 * pad_d - (dilation_d * (kernel_d - 1) + 1)) / stride_d + 1;
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
AT_ASSERTM(batch == batch_,
"Input shape and grad_out batch wont match: (%d vs %d).", batch, batch_);
AT_ASSERTM(channels_out == channels_out_,
"Input shape and grad_out channels_out wont match: (%d vs %d).", channels_out, channels_out_);
AT_ASSERTM(depth_out == depth_out_ && height_out == height_out_ && width_out == width_out_,
"Input shape and grad_out shape wont match: (%d x %d x %d vs %d x %d x %d).", depth_out, height_out, width_out, depth_out_, height_out_, width_out_);
auto grad_input = at::zeros_like(input);
auto grad_offset = at::zeros_like(offset);
auto grad_weight = at::zeros_like(weight);
auto grad_bias = at::zeros_like(bias);
auto weight_g = weight.view({group, channels_out/group, channels_kernel, kernel_d, kernel_h, kernel_w});
auto grad_weight_g = grad_weight.view({group, channels_out/group, channels_kernel, kernel_d, kernel_h, kernel_w});
auto grad_bias_g = grad_bias.view({group, channels_out/group});
const int batch_n = im2col_step_;
const int per_input_size = channels * depth * height * width;
const int per_offset_size = offset.size(1) * offset.size(2) * offset.size(3) * offset.size(4);
auto grad_output_n = grad_output.view({batch/im2col_step_, batch_n, channels_out, depth_out, height_out, width_out});
for (int n = 0; n < batch/im2col_step_; ++n)
{
auto grad_output_g = grad_output_n.select(0, n).view({batch_n, group, channels_out/group, depth_out, height_out, width_out});
auto ones = at::ones({batch_n * depth_out * height_out * width_out}, input.options());
auto columns = at::empty({channels * kernel_d * kernel_h * kernel_w, batch_n * depth_out * height_out * width_out}, input.options());
auto columns_g = columns.view({group, channels/group * kernel_d * kernel_h * kernel_w, batch_n * depth_out * height_out * width_out});
for (int g = 0; g < group; ++g)
{
auto grad_output_gm = grad_output_g.select(1, g).permute({1, 0, 2, 3, 4}).contiguous().view({channels_out/group, batch_n * depth_out * height_out * width_out});
auto weight_gm = weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_d * kernel_h * kernel_w}).t();
columns_g.select(0, g) = at::mm(weight_gm, grad_output_gm);
}
AT_DISPATCH_FLOATING_TYPES(input.type(), "deform_conv_backward_cuda", ([&] {
deformable_3d_col2im_coord_cuda(at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
columns.data<scalar_t>(),
input.data<scalar_t>() + n * im2col_step_ * per_input_size,
offset.data<scalar_t>() + n * im2col_step_ * per_offset_size,
batch_n, channels,
depth, height, width,
depth_out, height_out, width_out,
kernel_d, kernel_h, kernel_w,
pad_d, pad_h, pad_w,
stride_d, stride_h, stride_w,
dilation_d, dilation_h, dilation_w, deformable_group,
grad_offset.data<scalar_t>() + n * im2col_step_ * per_offset_size);
// gradient w.r.t. input data
deformable_3d_col2im_cuda(at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
columns.data<scalar_t>(),
offset.data<scalar_t>() + n * im2col_step_ * per_offset_size,
batch_n, channels,
depth, height, width,
depth_out, height_out, width_out,
kernel_d, kernel_h, kernel_w,
pad_d, pad_h, pad_w,
stride_d, stride_h, stride_w,
dilation_d, dilation_h, dilation_w, deformable_group,
grad_input.data<scalar_t>() + n * im2col_step_ * per_input_size);
// gradient w.r.t. weight, dWeight should accumulate across the batch and group
deformable_3d_im2col_cuda(at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input.data<scalar_t>() + n * im2col_step_ * per_input_size,
offset.data<scalar_t>() + n * im2col_step_ * per_offset_size,
batch_n, channels,
depth, height, width,
depth_out, height_out, width_out,
kernel_d, kernel_h, kernel_w,
pad_d, pad_h, pad_w,
stride_d, stride_h, stride_w,
dilation_d, dilation_h, dilation_w, deformable_group,
columns.data<scalar_t>());
}));
for (int g = 0; g < group; ++g)
{
auto grad_output_gm = grad_output_g.select(1, g).permute({1, 0, 2, 3, 4}).contiguous().view({channels_out/group, batch_n * depth_out * height_out * width_out});
auto columns_gm = columns_g.select(0, g).t();
auto grad_weight_gm = grad_weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_d * kernel_h * kernel_w});
auto grad_bias_gm = grad_bias_g.select(0, g);
grad_weight_g.select(0, g) = at::addmm(grad_weight_gm, grad_output_gm, columns_gm).view_as(grad_weight_g.select(0, g));
grad_bias_g.select(0, g) = at::addmv(grad_bias_gm, grad_output_gm, ones);
}
}
return {
grad_input, grad_offset, grad_weight, grad_bias
};
} | f8775c92efb6dccdeeb1bef55a99316e51001013.cu | #include <vector>
#include "cuda/deform_3d_im2col_cuda.cuh"
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <cuda.h>
#include <cuda_runtime.h>
// #include <THC/THC.h>
// #include <THC/THCAtomics.cuh>
// #include <THC/THCDeviceUtils.cuh>
at::Tensor
deform_conv3d_cuda_forward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const int kernel_d,
const int kernel_h,
const int kernel_w,
const int stride_d,
const int stride_h,
const int stride_w,
const int pad_d,
const int pad_h,
const int pad_w,
const int dilation_d,
const int dilation_h,
const int dilation_w,
const int group,
const int deformable_group,
const int im2col_step)
{
// THCAssertSameGPU(THCudaTensor_checkGPU(state, 5, input, weight, bias, offset, mask));
AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous");
AT_ASSERTM(weight.is_contiguous(), "weight tensor has to be contiguous");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int depth = input.size(2);
const int height = input.size(3);
const int width = input.size(4);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_d_ = weight.size(2);
const int kernel_h_ = weight.size(3);
const int kernel_w_ = weight.size(4);
const int im2col_step_ = std::min(batch, im2col_step);
AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_)
AT_ASSERTM((channels % group == 0) && (channels_out % group == 0),
"channels(%d) and channels_out(%d) must divide group(%d)", channels, channels_out, group)
// printf("Kernels: %d %d %d %d\n", kernel_h_, kernel_w_, kernel_w, kernel_h);
// printf("Channels: %d %d\n", channels, channels_kernel);
// printf("Channels: %d %d\n", channels_out, channels_kernel);
AT_ASSERTM(kernel_d_ == kernel_d && kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d x %d vs %d x %d x %d).",
kernel_d, kernel_h, kernel_w, kernel_d_, kernel_h_, kernel_w_);
AT_ASSERTM(channels == (channels_kernel * group),
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group);
const int depth_out = (depth + 2 * pad_d - (dilation_d * (kernel_d - 1) + 1)) / stride_d + 1;
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
auto output = at::empty({batch * depth_out * height_out * width_out, channels_out}, input.options());
// prepare group weight and bias
auto weight_g = weight.view({group, channels_out/group, channels_kernel, kernel_d, kernel_h, kernel_w});
auto bias_g = bias.view({group, channels_out/group});
// define alias for easy use
const int batch_n = im2col_step_;
const int per_input_size = channels * depth * height * width;
const int per_offset_size = offset.size(1) * offset.size(2) * offset.size(3) * offset.size(4);
auto output_n = output.view({batch/im2col_step_, batch_n * depth_out * height_out * width_out, channels_out});
for (int n = 0; n < batch/im2col_step_; ++n)
{
auto columns = at::empty({channels * kernel_d * kernel_h * kernel_w, batch_n * depth_out * height_out * width_out}, input.options());
AT_DISPATCH_FLOATING_TYPES(input.type(), "deform_conv_forward_cuda", ([&] {
deformable_3d_im2col_cuda(at::cuda::getCurrentCUDAStream(),
input.data<scalar_t>() + n * im2col_step_ * per_input_size,
offset.data<scalar_t>() + n * im2col_step_ * per_offset_size,
batch_n, channels, depth, height, width,
depth_out, height_out, width_out,
kernel_d, kernel_h, kernel_w,
pad_d, pad_h, pad_w,
stride_d, stride_h, stride_w,
dilation_d, dilation_h, dilation_w,
deformable_group,
columns.data<scalar_t>());
}));
auto columns_g = columns.view({group, channels/group * kernel_d * kernel_h * kernel_w, batch_n * depth_out * height_out * width_out});
auto output_g = output_n.select(0, n).view({batch_n * depth_out * height_out * width_out, group, channels_out/group});
for (int g = 0; g < group; ++g)
{
auto columns_gm = columns_g.select(0, g).t();
auto weight_gm = weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_d * kernel_h * kernel_w}).t();
auto output_m = at::addmm(bias_g.select(0, g), columns_gm, weight_gm);
output_g.select(1, g) = output_m.view({batch_n * depth_out * height_out * width_out, channels_out/group});
}
}
output = output.view({batch, depth_out, height_out, width_out, channels_out}).permute({0, 4, 1, 2, 3}).contiguous();
return output;
}
std::vector<at::Tensor> deform_conv3d_cuda_backward(const at::Tensor &input,
const at::Tensor &weight,
const at::Tensor &bias,
const at::Tensor &offset,
const at::Tensor &grad_output,
const int kernel_d,
const int kernel_h,
const int kernel_w,
const int stride_d,
const int stride_h,
const int stride_w,
const int pad_d,
const int pad_h,
const int pad_w,
const int dilation_d,
const int dilation_h,
const int dilation_w,
const int group,
const int deformable_group,
const int im2col_step)
{
AT_ASSERTM(input.is_contiguous(), "input tensor has to be contiguous");
AT_ASSERTM(weight.is_contiguous(), "weight tensor has to be contiguous");
AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor");
AT_ASSERTM(weight.type().is_cuda(), "weight must be a CUDA tensor");
AT_ASSERTM(bias.type().is_cuda(), "bias must be a CUDA tensor");
AT_ASSERTM(offset.type().is_cuda(), "offset must be a CUDA tensor");
const int batch = input.size(0);
const int channels = input.size(1);
const int depth = input.size(2);
const int height = input.size(3);
const int width = input.size(4);
const int channels_out = weight.size(0);
const int channels_kernel = weight.size(1);
const int kernel_d_ = weight.size(2);
const int kernel_h_ = weight.size(3);
const int kernel_w_ = weight.size(4);
const int batch_ = grad_output.size(0);
const int channels_out_ = grad_output.size(1);
const int depth_out_ = grad_output.size(2);
const int height_out_ = grad_output.size(3);
const int width_out_ = grad_output.size(4);
const int im2col_step_ = std::min(im2col_step, batch);
AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_)
AT_ASSERTM((channels % group == 0) && (channels_out % group == 0),
"channels(%d) and channels_out(%d) must divide group(%d)", channels, channels_out, group)
AT_ASSERTM(kernel_d_ == kernel_d && kernel_h_ == kernel_h && kernel_w_ == kernel_w,
"Input shape and kernel shape wont match: (%d x %d x %d vs %d x %d x %d).", kernel_d, kernel_h, kernel_w, kernel_d_, kernel_h_, kernel_w_);
AT_ASSERTM(channels == (channels_kernel * group),
"Input shape and kernel channels wont match: (%d vs %d).", channels, channels_kernel * group);
const int depth_out = (depth + 2 * pad_d - (dilation_d * (kernel_d - 1) + 1)) / stride_d + 1;
const int height_out = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1;
const int width_out = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1;
AT_ASSERTM(batch == batch_,
"Input shape and grad_out batch wont match: (%d vs %d).", batch, batch_);
AT_ASSERTM(channels_out == channels_out_,
"Input shape and grad_out channels_out wont match: (%d vs %d).", channels_out, channels_out_);
AT_ASSERTM(depth_out == depth_out_ && height_out == height_out_ && width_out == width_out_,
"Input shape and grad_out shape wont match: (%d x %d x %d vs %d x %d x %d).", depth_out, height_out, width_out, depth_out_, height_out_, width_out_);
auto grad_input = at::zeros_like(input);
auto grad_offset = at::zeros_like(offset);
auto grad_weight = at::zeros_like(weight);
auto grad_bias = at::zeros_like(bias);
auto weight_g = weight.view({group, channels_out/group, channels_kernel, kernel_d, kernel_h, kernel_w});
auto grad_weight_g = grad_weight.view({group, channels_out/group, channels_kernel, kernel_d, kernel_h, kernel_w});
auto grad_bias_g = grad_bias.view({group, channels_out/group});
const int batch_n = im2col_step_;
const int per_input_size = channels * depth * height * width;
const int per_offset_size = offset.size(1) * offset.size(2) * offset.size(3) * offset.size(4);
auto grad_output_n = grad_output.view({batch/im2col_step_, batch_n, channels_out, depth_out, height_out, width_out});
for (int n = 0; n < batch/im2col_step_; ++n)
{
auto grad_output_g = grad_output_n.select(0, n).view({batch_n, group, channels_out/group, depth_out, height_out, width_out});
auto ones = at::ones({batch_n * depth_out * height_out * width_out}, input.options());
auto columns = at::empty({channels * kernel_d * kernel_h * kernel_w, batch_n * depth_out * height_out * width_out}, input.options());
auto columns_g = columns.view({group, channels/group * kernel_d * kernel_h * kernel_w, batch_n * depth_out * height_out * width_out});
for (int g = 0; g < group; ++g)
{
auto grad_output_gm = grad_output_g.select(1, g).permute({1, 0, 2, 3, 4}).contiguous().view({channels_out/group, batch_n * depth_out * height_out * width_out});
auto weight_gm = weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_d * kernel_h * kernel_w}).t();
columns_g.select(0, g) = at::mm(weight_gm, grad_output_gm);
}
AT_DISPATCH_FLOATING_TYPES(input.type(), "deform_conv_backward_cuda", ([&] {
deformable_3d_col2im_coord_cuda(at::cuda::getCurrentCUDAStream(),
columns.data<scalar_t>(),
input.data<scalar_t>() + n * im2col_step_ * per_input_size,
offset.data<scalar_t>() + n * im2col_step_ * per_offset_size,
batch_n, channels,
depth, height, width,
depth_out, height_out, width_out,
kernel_d, kernel_h, kernel_w,
pad_d, pad_h, pad_w,
stride_d, stride_h, stride_w,
dilation_d, dilation_h, dilation_w, deformable_group,
grad_offset.data<scalar_t>() + n * im2col_step_ * per_offset_size);
// gradient w.r.t. input data
deformable_3d_col2im_cuda(at::cuda::getCurrentCUDAStream(),
columns.data<scalar_t>(),
offset.data<scalar_t>() + n * im2col_step_ * per_offset_size,
batch_n, channels,
depth, height, width,
depth_out, height_out, width_out,
kernel_d, kernel_h, kernel_w,
pad_d, pad_h, pad_w,
stride_d, stride_h, stride_w,
dilation_d, dilation_h, dilation_w, deformable_group,
grad_input.data<scalar_t>() + n * im2col_step_ * per_input_size);
// gradient w.r.t. weight, dWeight should accumulate across the batch and group
deformable_3d_im2col_cuda(at::cuda::getCurrentCUDAStream(),
input.data<scalar_t>() + n * im2col_step_ * per_input_size,
offset.data<scalar_t>() + n * im2col_step_ * per_offset_size,
batch_n, channels,
depth, height, width,
depth_out, height_out, width_out,
kernel_d, kernel_h, kernel_w,
pad_d, pad_h, pad_w,
stride_d, stride_h, stride_w,
dilation_d, dilation_h, dilation_w, deformable_group,
columns.data<scalar_t>());
}));
for (int g = 0; g < group; ++g)
{
auto grad_output_gm = grad_output_g.select(1, g).permute({1, 0, 2, 3, 4}).contiguous().view({channels_out/group, batch_n * depth_out * height_out * width_out});
auto columns_gm = columns_g.select(0, g).t();
auto grad_weight_gm = grad_weight_g.select(0, g).view({channels_out/group, channels_kernel * kernel_d * kernel_h * kernel_w});
auto grad_bias_gm = grad_bias_g.select(0, g);
grad_weight_g.select(0, g) = at::addmm(grad_weight_gm, grad_output_gm, columns_gm).view_as(grad_weight_g.select(0, g));
grad_bias_g.select(0, g) = at::addmv(grad_bias_gm, grad_output_gm, ones);
}
}
return {
grad_input, grad_offset, grad_weight, grad_bias
};
} |
5637c0ac0b7bf9f1e4235243d4de58c1e512a654.hip | // !!! This is a file automatically generated by hipify!!!
#define _SIZE_T_DEFINED
#ifndef __cplusplus
#define __cplusplus
#endif
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include <builtin_types.h>
#include <vector_functions.h>
#include <math_functions.h>
#include <float.h>
#include <hip/hip_complex.h>
#include "Reduction\f_dot_f.cuh"
#include "Reduction\Reduction.cu"
#define ADD 0
#define SUB 1
#define MUL 2
#define AND 3
#define OR 4
#define OR_THRESHOLD 5
#define XOR 6
#define XNOR 7
#define IMP 8
#define PERM 9
#define INV_PERM 10
#define MODULO 11
#define DIVISION_INT 12
#define EQUAL 13
#define MAX_OPERANDS 20
#define MAX_SYMBOL_SIZE 4096
extern "C"
{
//kernel code performs no binarity checks
__global__ void CombineVectorsKernel(float** inputs, int inputsCount, float* output, int method, int count)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId >= count)
return;
float out = inputs[0][threadId];
switch (method)
{
case SUB:
for (int i = 1; i < inputsCount; i++)
out -= inputs[i][threadId];
break;
case ADD:
for (int i = 1; i < inputsCount; i++)
out += inputs[i][threadId];
break;
case AND:
case MUL:
for (int i = 1; i < inputsCount; i++)
out *= inputs[i][threadId];
break;
case OR:
for (int i = 1; i < inputsCount; i++)
out += inputs[i][threadId];
out = out >= 1;
break;
case OR_THRESHOLD:
for (int i = 1; i < inputsCount; i++)
out += inputs[i][threadId];
out = out >= (inputsCount * 0.5f);
break;
case XOR:
for (int i = 1; i < inputsCount; i++)
out += inputs[i][threadId];
out = ((int)out) % 2;
break;
case XNOR:
for (int i = 1; i < inputsCount; i++)
out += inputs[i][threadId];
out = ((int)out + 1) % 2;
break;
case PERM:
__shared__ float tmp[MAX_SYMBOL_SIZE];
tmp[threadId] = out;
__threadfence();
for (int i = 1; i < inputsCount; i++)
{
float val = tmp[__float2int_rn(inputs[i][threadId])];
__syncthreads();
tmp[threadId] = val;
__threadfence();
}
out = tmp[threadId];
break;
case INV_PERM:
__shared__ float i_tmp[MAX_SYMBOL_SIZE];
i_tmp[threadId] = out;
__threadfence();
for (int i = 1; i < inputsCount; i++)
{
int idx = __float2int_rn(inputs[i][threadId]);
float val = i_tmp[threadId];
__syncthreads();
i_tmp[idx] = val;
__threadfence();
}
out = i_tmp[threadId];
break;
case EQUAL: // Warning: uses a strict equality comparison on floats
{
bool eq = true;
for (int i = 1; eq && (i < inputsCount); i++)
{
eq = (eq && (out == inputs[i][threadId]));
}
out = eq ? 1.0f : 0.0f;
break;
}
default:
break;
}
output[threadId] = out;
}
__device__ __forceinline__ void CombineTwoVectorsInternal(const float& input1, const float& input2, float& output, int method)
{
switch (method)
{
case SUB:
{
output = input1 - input2;
break;
}
case ADD:
{
output = input1 + input2;
break;
}
case AND:
case MUL:
{
output = input1 * input2;
break;
}
case OR:
case OR_THRESHOLD:
{
output = (input1 + input2) >= 1;
break;
}
case XOR:
{
output = (input1 + input2) == 1;
break;
}
case XNOR:
{
output = (input1 + input2) != 1;
break;
}
case IMP:
{
output = input1 <= input2;
break;
}
case MODULO:
{
int mod = __float2int_rn(input2);
int n = __float2int_rd(input1 / mod);
output = input1 - mod * n;
break;
}
case DIVISION_INT:
{
output = __float2int_rz(input1 / input2);
break;
}
case EQUAL:
{
output = (input1 == input2) ? 1.0f : 0.0f;
break;
}
default:
break;
}
}
__global__ void CombineTwoVectorsKernel(const float* input1, const float* input2, float* output, int method, int count)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (threadId >= count)
return;
switch (method)
{
case PERM:
{
float tmp = input1[(int)input2[threadId]];
if (input1 == output)
__threadfence();
output[threadId] = tmp;
break;
}
case INV_PERM:
{
int idx = (int)input2[threadId];
if (input1 == output)
__threadfence();
output[idx] = input1[threadId];
break;
}
default:
CombineTwoVectorsInternal(input1[threadId], input2[threadId], output[threadId], method);
break;
}
}
__device__ __forceinline__ void CombineTwoVectorsKernelVarSizeInternal(const float* input1, const float* input2, float* output, int method, int count1, int count2)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
switch (method)
{
case PERM:
{
if (count2 > count1)
return;
float tmp = input1[(int)input2[threadId]];
if (input1 == output)
__threadfence();
output[threadId] = tmp;
break;
}
case INV_PERM:
{
if (count2 > count1)
return;
int idx = (int)input2[threadId];
if (input1 == output)
__threadfence();
output[idx] = input1[threadId];
break;
}
default:
{
int minCount = count1 <= count2 ? count1 : count2;
if (threadId < minCount)
{
CombineTwoVectorsInternal(input1[threadId], input2[threadId], output[threadId], method);
return;
}
if (count1 > count2)
{
if (threadId < count1)
output[threadId] = input1[threadId];
}
else if (count2 > count1)
{
if (threadId < count2)
output[threadId] = method == SUB ? -input2[threadId] : input2[threadId];
}
break;
}
}
}
__global__ void CombineTwoVectorsKernelVarSize(float* input1, float* input2, float* output, int method, int count1, int count2)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (count1 > 1)
{
if (count2 > 1)
{
CombineTwoVectorsKernelVarSizeInternal(input1, input2, output, method, count1, count2);
}
else if (threadId < count1)
{
CombineTwoVectorsInternal(input1[threadId], input2[0], output[threadId], method);
}
}
else
{
if (count2 > 1 && threadId < count2)
{
CombineTwoVectorsInternal(input1[0], input2[threadId], output[threadId], method);
}
else
{
CombineTwoVectorsInternal(input1[0], input2[0], output[threadId], method);
}
}
}
__global__ void AddToIdcs(float* source, const float* idcs, float* target, int method, int idcsCount)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (threadId >= idcsCount) // Should be true: idcsCount == sourceCount
return;
float& tar = target[__float2int_rn(idcs[threadId])];
float& src = source[threadId];
switch (method)
{
case ADD:
atomicAdd(&tar, src);
break;
case SUB:
atomicAdd(&tar, -src);
break;
case OR:
tar = src;
break;
default:
break;
}
}
__global__ void MapToIdcs(float* source, float* sourceLengthSq, const float* idcs, float* target, int idcsCount)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (threadId >= idcsCount) // Should be true: idcsCount == sourceCount
return;
float& tar = target[__float2int_rn(idcs[threadId])];
float& src = source[threadId];
float len = *sourceLengthSq;
if (len < 0.0000001f)
return;
len = 1 / sqrtf(len);
// Write the normalized vector back to output
CombineTwoVectorsInternal(src, len, tar, MUL);
}
__global__ void LengthFromElements(float* element1, float* element2, float* output, int count)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < count)
{
output[threadId] = sqrtf(element1[threadId] * element1[threadId] + element2[threadId] * element2[threadId]);
}
}
__global__ void MulComplexElementWise(cuFloatComplex* input1, cuFloatComplex* input2, cuFloatComplex* output, int count)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < count)
{
cuFloatComplex i1 = input1[threadId];
cuFloatComplex i2 = input2[threadId];
output[threadId] = cuCmulf(i1, i2);
}
}
__global__ void InvolveVector(float* input, float* output, int inputSize)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < inputSize - 1)
{
output[0] = input[0];
output[threadId + 1] = input[inputSize - threadId - 1];
}
}
__global__ void Interpolate(float* input1, float* input2, float* output, float weight, int inputSize)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < inputSize)
{
if (weight <= 0)
{
output[threadId] = input1[threadId];
}
else if (weight >= 1)
{
output[threadId] = input2[threadId];
}
else
{
output[threadId] = (1 - weight) * input1[threadId] + weight * input2[threadId];
}
}
}
__global__ void InterpolateFromMemBlock(float* input1, float* input2, float* output, float* weightMemBlock, int inputSize)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < inputSize)
{
if (weightMemBlock[0] <= 0)
{
output[threadId] = input1[threadId];
}
else if (weightMemBlock[0] >= 1)
{
output[threadId] = input2[threadId];
}
else
{
output[threadId] = (1 - weightMemBlock[0]) * input1[threadId] + weightMemBlock[0] * input2[threadId];
}
}
}
// naive mat. multiplication
// TODO: rewrite it with sync_threads... :) Check out nvida dev-blog or TestFeat/HMath.cu how it will be...
__global__ void MatMultipl_naive (float * A, float * B, float * C , int nColsA , int nColsB , int sizeC ) {
int i_col = blockIdx.x * blockDim.x + threadIdx.x; /// index in row
int i_row = blockIdx.y * blockDim.y + threadIdx.y; /// index in column
int idx = i_row * nColsB + i_col; // # of cols in B = # of cols in C
float Cvalue = 0;
if (idx < sizeC){
for (int e=0; e < nColsA; e++)
Cvalue += A[i_row * nColsA + e] * B[e * nColsB + i_col];
C[idx] = Cvalue;
}
}
}
| 5637c0ac0b7bf9f1e4235243d4de58c1e512a654.cu | #define _SIZE_T_DEFINED
#ifndef __cplusplus
#define __cplusplus
#endif
#include <cuda.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include <builtin_types.h>
#include <vector_functions.h>
#include <math_functions.h>
#include <float.h>
#include <cuComplex.h>
#include "Reduction\f_dot_f.cuh"
#include "Reduction\Reduction.cu"
#define ADD 0
#define SUB 1
#define MUL 2
#define AND 3
#define OR 4
#define OR_THRESHOLD 5
#define XOR 6
#define XNOR 7
#define IMP 8
#define PERM 9
#define INV_PERM 10
#define MODULO 11
#define DIVISION_INT 12
#define EQUAL 13
#define MAX_OPERANDS 20
#define MAX_SYMBOL_SIZE 4096
extern "C"
{
//kernel code performs no binarity checks
__global__ void CombineVectorsKernel(float** inputs, int inputsCount, float* output, int method, int count)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId >= count)
return;
float out = inputs[0][threadId];
switch (method)
{
case SUB:
for (int i = 1; i < inputsCount; i++)
out -= inputs[i][threadId];
break;
case ADD:
for (int i = 1; i < inputsCount; i++)
out += inputs[i][threadId];
break;
case AND:
case MUL:
for (int i = 1; i < inputsCount; i++)
out *= inputs[i][threadId];
break;
case OR:
for (int i = 1; i < inputsCount; i++)
out += inputs[i][threadId];
out = out >= 1;
break;
case OR_THRESHOLD:
for (int i = 1; i < inputsCount; i++)
out += inputs[i][threadId];
out = out >= (inputsCount * 0.5f);
break;
case XOR:
for (int i = 1; i < inputsCount; i++)
out += inputs[i][threadId];
out = ((int)out) % 2;
break;
case XNOR:
for (int i = 1; i < inputsCount; i++)
out += inputs[i][threadId];
out = ((int)out + 1) % 2;
break;
case PERM:
__shared__ float tmp[MAX_SYMBOL_SIZE];
tmp[threadId] = out;
__threadfence();
for (int i = 1; i < inputsCount; i++)
{
float val = tmp[__float2int_rn(inputs[i][threadId])];
__syncthreads();
tmp[threadId] = val;
__threadfence();
}
out = tmp[threadId];
break;
case INV_PERM:
__shared__ float i_tmp[MAX_SYMBOL_SIZE];
i_tmp[threadId] = out;
__threadfence();
for (int i = 1; i < inputsCount; i++)
{
int idx = __float2int_rn(inputs[i][threadId]);
float val = i_tmp[threadId];
__syncthreads();
i_tmp[idx] = val;
__threadfence();
}
out = i_tmp[threadId];
break;
case EQUAL: // Warning: uses a strict equality comparison on floats
{
bool eq = true;
for (int i = 1; eq && (i < inputsCount); i++)
{
eq = (eq && (out == inputs[i][threadId]));
}
out = eq ? 1.0f : 0.0f;
break;
}
default:
break;
}
output[threadId] = out;
}
__device__ __forceinline__ void CombineTwoVectorsInternal(const float& input1, const float& input2, float& output, int method)
{
switch (method)
{
case SUB:
{
output = input1 - input2;
break;
}
case ADD:
{
output = input1 + input2;
break;
}
case AND:
case MUL:
{
output = input1 * input2;
break;
}
case OR:
case OR_THRESHOLD:
{
output = (input1 + input2) >= 1;
break;
}
case XOR:
{
output = (input1 + input2) == 1;
break;
}
case XNOR:
{
output = (input1 + input2) != 1;
break;
}
case IMP:
{
output = input1 <= input2;
break;
}
case MODULO:
{
int mod = __float2int_rn(input2);
int n = __float2int_rd(input1 / mod);
output = input1 - mod * n;
break;
}
case DIVISION_INT:
{
output = __float2int_rz(input1 / input2);
break;
}
case EQUAL:
{
output = (input1 == input2) ? 1.0f : 0.0f;
break;
}
default:
break;
}
}
__global__ void CombineTwoVectorsKernel(const float* input1, const float* input2, float* output, int method, int count)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (threadId >= count)
return;
switch (method)
{
case PERM:
{
float tmp = input1[(int)input2[threadId]];
if (input1 == output)
__threadfence();
output[threadId] = tmp;
break;
}
case INV_PERM:
{
int idx = (int)input2[threadId];
if (input1 == output)
__threadfence();
output[idx] = input1[threadId];
break;
}
default:
CombineTwoVectorsInternal(input1[threadId], input2[threadId], output[threadId], method);
break;
}
}
__device__ __forceinline__ void CombineTwoVectorsKernelVarSizeInternal(const float* input1, const float* input2, float* output, int method, int count1, int count2)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
switch (method)
{
case PERM:
{
if (count2 > count1)
return;
float tmp = input1[(int)input2[threadId]];
if (input1 == output)
__threadfence();
output[threadId] = tmp;
break;
}
case INV_PERM:
{
if (count2 > count1)
return;
int idx = (int)input2[threadId];
if (input1 == output)
__threadfence();
output[idx] = input1[threadId];
break;
}
default:
{
int minCount = count1 <= count2 ? count1 : count2;
if (threadId < minCount)
{
CombineTwoVectorsInternal(input1[threadId], input2[threadId], output[threadId], method);
return;
}
if (count1 > count2)
{
if (threadId < count1)
output[threadId] = input1[threadId];
}
else if (count2 > count1)
{
if (threadId < count2)
output[threadId] = method == SUB ? -input2[threadId] : input2[threadId];
}
break;
}
}
}
__global__ void CombineTwoVectorsKernelVarSize(float* input1, float* input2, float* output, int method, int count1, int count2)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (count1 > 1)
{
if (count2 > 1)
{
CombineTwoVectorsKernelVarSizeInternal(input1, input2, output, method, count1, count2);
}
else if (threadId < count1)
{
CombineTwoVectorsInternal(input1[threadId], input2[0], output[threadId], method);
}
}
else
{
if (count2 > 1 && threadId < count2)
{
CombineTwoVectorsInternal(input1[0], input2[threadId], output[threadId], method);
}
else
{
CombineTwoVectorsInternal(input1[0], input2[0], output[threadId], method);
}
}
}
__global__ void AddToIdcs(float* source, const float* idcs, float* target, int method, int idcsCount)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (threadId >= idcsCount) // Should be true: idcsCount == sourceCount
return;
float& tar = target[__float2int_rn(idcs[threadId])];
float& src = source[threadId];
switch (method)
{
case ADD:
atomicAdd(&tar, src);
break;
case SUB:
atomicAdd(&tar, -src);
break;
case OR:
tar = src;
break;
default:
break;
}
}
__global__ void MapToIdcs(float* source, float* sourceLengthSq, const float* idcs, float* target, int idcsCount)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (threadId >= idcsCount) // Should be true: idcsCount == sourceCount
return;
float& tar = target[__float2int_rn(idcs[threadId])];
float& src = source[threadId];
float len = *sourceLengthSq;
if (len < 0.0000001f)
return;
len = 1 / sqrtf(len);
// Write the normalized vector back to output
CombineTwoVectorsInternal(src, len, tar, MUL);
}
__global__ void LengthFromElements(float* element1, float* element2, float* output, int count)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < count)
{
output[threadId] = sqrtf(element1[threadId] * element1[threadId] + element2[threadId] * element2[threadId]);
}
}
__global__ void MulComplexElementWise(cuFloatComplex* input1, cuFloatComplex* input2, cuFloatComplex* output, int count)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < count)
{
cuFloatComplex i1 = input1[threadId];
cuFloatComplex i2 = input2[threadId];
output[threadId] = cuCmulf(i1, i2);
}
}
__global__ void InvolveVector(float* input, float* output, int inputSize)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < inputSize - 1)
{
output[0] = input[0];
output[threadId + 1] = input[inputSize - threadId - 1];
}
}
__global__ void Interpolate(float* input1, float* input2, float* output, float weight, int inputSize)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < inputSize)
{
if (weight <= 0)
{
output[threadId] = input1[threadId];
}
else if (weight >= 1)
{
output[threadId] = input2[threadId];
}
else
{
output[threadId] = (1 - weight) * input1[threadId] + weight * input2[threadId];
}
}
}
__global__ void InterpolateFromMemBlock(float* input1, float* input2, float* output, float* weightMemBlock, int inputSize)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < inputSize)
{
if (weightMemBlock[0] <= 0)
{
output[threadId] = input1[threadId];
}
else if (weightMemBlock[0] >= 1)
{
output[threadId] = input2[threadId];
}
else
{
output[threadId] = (1 - weightMemBlock[0]) * input1[threadId] + weightMemBlock[0] * input2[threadId];
}
}
}
// naive mat. multiplication
// TODO: rewrite it with sync_threads... :) Check out nvida dev-blog or TestFeat/HMath.cu how it will be...
__global__ void MatMultipl_naive (float * A, float * B, float * C , int nColsA , int nColsB , int sizeC ) {
int i_col = blockIdx.x * blockDim.x + threadIdx.x; /// index in row
int i_row = blockIdx.y * blockDim.y + threadIdx.y; /// index in column
int idx = i_row * nColsB + i_col; // # of cols in B = # of cols in C
float Cvalue = 0;
if (idx < sizeC){
for (int e=0; e < nColsA; e++)
Cvalue += A[i_row * nColsA + e] * B[e * nColsB + i_col];
C[idx] = Cvalue;
}
}
}
|
8b7aba5a724b441827ab35a623be88657a04ba0a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <quda_internal.h>
#include <quda_matrix.h>
#include <tune_quda.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <launch_kernel.cuh>
#include <atomic.cuh>
#include <cub_helper.cuh>
#include <index_helper.cuh>
namespace quda {
#ifdef GPU_GAUGE_TOOLS
template <typename Gauge>
struct GaugePlaqArg : public ReduceArg<double2> {
int threads; // number of active threads required
int E[4]; // extended grid dimensions
int X[4]; // true grid dimensions
int border[4];
Gauge dataOr;
GaugePlaqArg(const Gauge &dataOr, const GaugeField &data)
: ReduceArg<double2>(), dataOr(dataOr)
{
int R = 0;
for (int dir=0; dir<4; ++dir){
border[dir] = data.R()[dir];
E[dir] = data.X()[dir];
X[dir] = data.X()[dir] - border[dir]*2;
R += border[dir];
}
threads = X[0]*X[1]*X[2]*X[3]/2;
}
};
template<int blockSize, typename Float, typename Gauge>
__global__ void computePlaq(GaugePlaqArg<Gauge> arg){
typedef Matrix<complex<Float>,3> Link;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int parity = threadIdx.y;
double2 plaq = make_double2(0.0,0.0);
if(idx < arg.threads) {
int x[4];
getCoords(x, idx, arg.X, parity);
for (int dr=0; dr<4; ++dr) x[dr] += arg.border[dr]; // extended grid coordinates
int dx[4] = {0, 0, 0, 0};
for (int mu = 0; mu < 3; mu++) {
for (int nu = (mu+1); nu < 3; nu++) {
Link U1 = arg.dataOr(mu, linkIndexShift(x,dx,arg.E), parity);
dx[mu]++;
Link U2 = arg.dataOr(nu, linkIndexShift(x,dx,arg.E), 1-parity);
dx[mu]--;
dx[nu]++;
Link U3 = arg.dataOr(mu, linkIndexShift(x,dx,arg.E), 1-parity);
dx[nu]--;
Link U4 = arg.dataOr(nu, linkIndexShift(x,dx,arg.E), parity);
plaq.x += getTrace( U1 * U2 * conj(U3) * conj(U4) ).x;
}
Link U1 = arg.dataOr(mu, linkIndexShift(x,dx,arg.E), parity);
dx[mu]++;
Link U2 = arg.dataOr(3, linkIndexShift(x,dx,arg.E), 1-parity);
dx[mu]--;
dx[3]++;
Link U3 = arg.dataOr(mu,linkIndexShift(x,dx,arg.E), 1-parity);
dx[3]--;
Link U4 = arg.dataOr(3, linkIndexShift(x,dx,arg.E), parity);
plaq.y += getTrace( U1 * U2 * conj(U3) * conj(U4) ).x;
}
}
// perform final inter-block reduction and write out result
reduce2d<blockSize,2>(arg, plaq);
}
template<typename Float, typename Gauge>
class GaugePlaq : TunableLocalParity {
GaugePlaqArg<Gauge> arg;
const QudaFieldLocation location;
private:
unsigned int minThreads() const { return arg.threads; }
public:
GaugePlaq(GaugePlaqArg<Gauge> &arg, QudaFieldLocation location)
: arg(arg), location(location) {}
~GaugePlaq () { }
void apply(const hipStream_t &stream){
if(location == QUDA_CUDA_FIELD_LOCATION){
arg.result_h[0] = make_double2(0.,0.);
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
LAUNCH_KERNEL_LOCAL_PARITY(computePlaq, tp, stream, arg, Float, Gauge);
qudaDeviceSynchronize();
} else {
errorQuda("CPU not supported yet\n");
}
}
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << arg.X[0] << "x" << arg.X[1] << "x" << arg.X[2] << "x" << arg.X[3];
aux << "threads=" << arg.threads << ",prec=" << sizeof(Float);
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux.str().c_str());
}
long long flops() const { return 6ll*2*arg.threads*(3*198+3); }
long long bytes() const { return 6ll*4*2*arg.threads*arg.dataOr.Bytes(); }
};
template<typename Float, typename Gauge>
void plaquette(const Gauge dataOr, const GaugeField& data, double2 &plq, QudaFieldLocation location) {
GaugePlaqArg<Gauge> arg(dataOr, data);
GaugePlaq<Float,Gauge> gaugePlaq(arg, location);
gaugePlaq.apply(0);
comm_allreduce_array((double*) arg.result_h, 2);
arg.result_h[0].x /= 9.*(2*arg.threads*comm_size());
arg.result_h[0].y /= 9.*(2*arg.threads*comm_size());
plq.x = arg.result_h[0].x;
plq.y = arg.result_h[0].y;
}
template<typename Float>
void plaquette(const GaugeField& data, double2 &plq, QudaFieldLocation location) {
INSTANTIATE_RECONSTRUCT(plaquette<Float>, data, plq, location);
}
#endif
double3 plaquette(const GaugeField& data, QudaFieldLocation location) {
#ifdef GPU_GAUGE_TOOLS
double2 plq;
INSTANTIATE_PRECISION(plaquette, data, plq, location);
double3 plaq = make_double3(0.5*(plq.x + plq.y), plq.x, plq.y);
#else
errorQuda("Gauge tools are not build");
double3 plaq = make_double3(0., 0., 0.);
#endif
return plaq;
}
} // namespace quda
| 8b7aba5a724b441827ab35a623be88657a04ba0a.cu | #include <quda_internal.h>
#include <quda_matrix.h>
#include <tune_quda.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <launch_kernel.cuh>
#include <atomic.cuh>
#include <cub_helper.cuh>
#include <index_helper.cuh>
namespace quda {
#ifdef GPU_GAUGE_TOOLS
template <typename Gauge>
struct GaugePlaqArg : public ReduceArg<double2> {
int threads; // number of active threads required
int E[4]; // extended grid dimensions
int X[4]; // true grid dimensions
int border[4];
Gauge dataOr;
GaugePlaqArg(const Gauge &dataOr, const GaugeField &data)
: ReduceArg<double2>(), dataOr(dataOr)
{
int R = 0;
for (int dir=0; dir<4; ++dir){
border[dir] = data.R()[dir];
E[dir] = data.X()[dir];
X[dir] = data.X()[dir] - border[dir]*2;
R += border[dir];
}
threads = X[0]*X[1]*X[2]*X[3]/2;
}
};
template<int blockSize, typename Float, typename Gauge>
__global__ void computePlaq(GaugePlaqArg<Gauge> arg){
typedef Matrix<complex<Float>,3> Link;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int parity = threadIdx.y;
double2 plaq = make_double2(0.0,0.0);
if(idx < arg.threads) {
int x[4];
getCoords(x, idx, arg.X, parity);
for (int dr=0; dr<4; ++dr) x[dr] += arg.border[dr]; // extended grid coordinates
int dx[4] = {0, 0, 0, 0};
for (int mu = 0; mu < 3; mu++) {
for (int nu = (mu+1); nu < 3; nu++) {
Link U1 = arg.dataOr(mu, linkIndexShift(x,dx,arg.E), parity);
dx[mu]++;
Link U2 = arg.dataOr(nu, linkIndexShift(x,dx,arg.E), 1-parity);
dx[mu]--;
dx[nu]++;
Link U3 = arg.dataOr(mu, linkIndexShift(x,dx,arg.E), 1-parity);
dx[nu]--;
Link U4 = arg.dataOr(nu, linkIndexShift(x,dx,arg.E), parity);
plaq.x += getTrace( U1 * U2 * conj(U3) * conj(U4) ).x;
}
Link U1 = arg.dataOr(mu, linkIndexShift(x,dx,arg.E), parity);
dx[mu]++;
Link U2 = arg.dataOr(3, linkIndexShift(x,dx,arg.E), 1-parity);
dx[mu]--;
dx[3]++;
Link U3 = arg.dataOr(mu,linkIndexShift(x,dx,arg.E), 1-parity);
dx[3]--;
Link U4 = arg.dataOr(3, linkIndexShift(x,dx,arg.E), parity);
plaq.y += getTrace( U1 * U2 * conj(U3) * conj(U4) ).x;
}
}
// perform final inter-block reduction and write out result
reduce2d<blockSize,2>(arg, plaq);
}
template<typename Float, typename Gauge>
class GaugePlaq : TunableLocalParity {
GaugePlaqArg<Gauge> arg;
const QudaFieldLocation location;
private:
unsigned int minThreads() const { return arg.threads; }
public:
GaugePlaq(GaugePlaqArg<Gauge> &arg, QudaFieldLocation location)
: arg(arg), location(location) {}
~GaugePlaq () { }
void apply(const cudaStream_t &stream){
if(location == QUDA_CUDA_FIELD_LOCATION){
arg.result_h[0] = make_double2(0.,0.);
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
LAUNCH_KERNEL_LOCAL_PARITY(computePlaq, tp, stream, arg, Float, Gauge);
qudaDeviceSynchronize();
} else {
errorQuda("CPU not supported yet\n");
}
}
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << arg.X[0] << "x" << arg.X[1] << "x" << arg.X[2] << "x" << arg.X[3];
aux << "threads=" << arg.threads << ",prec=" << sizeof(Float);
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux.str().c_str());
}
long long flops() const { return 6ll*2*arg.threads*(3*198+3); }
long long bytes() const { return 6ll*4*2*arg.threads*arg.dataOr.Bytes(); }
};
template<typename Float, typename Gauge>
void plaquette(const Gauge dataOr, const GaugeField& data, double2 &plq, QudaFieldLocation location) {
GaugePlaqArg<Gauge> arg(dataOr, data);
GaugePlaq<Float,Gauge> gaugePlaq(arg, location);
gaugePlaq.apply(0);
comm_allreduce_array((double*) arg.result_h, 2);
arg.result_h[0].x /= 9.*(2*arg.threads*comm_size());
arg.result_h[0].y /= 9.*(2*arg.threads*comm_size());
plq.x = arg.result_h[0].x;
plq.y = arg.result_h[0].y;
}
template<typename Float>
void plaquette(const GaugeField& data, double2 &plq, QudaFieldLocation location) {
INSTANTIATE_RECONSTRUCT(plaquette<Float>, data, plq, location);
}
#endif
double3 plaquette(const GaugeField& data, QudaFieldLocation location) {
#ifdef GPU_GAUGE_TOOLS
double2 plq;
INSTANTIATE_PRECISION(plaquette, data, plq, location);
double3 plaq = make_double3(0.5*(plq.x + plq.y), plq.x, plq.y);
#else
errorQuda("Gauge tools are not build");
double3 plaq = make_double3(0., 0., 0.);
#endif
return plaq;
}
} // namespace quda
|
a207335d00940f468df7d295c004b215d9976aa3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "parameters.cuh"
__global__ void imemset(int *x, int n) {
int i = blockIdx.x*blockDim.x + threadIdx.x + 1;
if(i <= n)
x[i] = 0;
}
int main(int argc, char *argv[]) {
clock_t begin = clock();
no_of_fluid = len.prod()*atoi(argv[1]), no_of_colloid = atoi(argv[2]);
point mom;
double ke_fluid, energy_colloid;
initialize();
initialize_rand();
initialize_colloid();
initialize_fluid();
create_box();
neighbour_list_mpcd();
neighbour_list_md();
compute_force_md();
tumble();
printf("After Tumble\n");
for(nn = 1; nn <= niter; nn++) {
if(!(nn%1000)) printf("%12d\n", nn);
rotation_mpcd();
run();
for(int l = 1; l <= n; l++) {
update_pos_md();
neighbour_list_md();
update_pos_mpcd();
neighbour_list_mpcd();
if(!(l%10) && nn > 10000) updown_velocity();
fluid_colloid_collision();
update_activity_direction();
compute_force_md();
update_velocity_colloid();
}
hipDeviceSynchronize();
energy_colloid = *potential_colloid;
energy_colloid += 0.5*mass_colloid*thrust::transform_reduce(thrust::device, vel_colloid + 1, vel_colloid + no_of_colloid + 1, mod_value(), (double)0, add_double());
energy_colloid += 0.5*I_colloid*thrust::transform_reduce(thrust::device, ang_vel_colloid + 1, ang_vel_colloid + no_of_colloid + 1, mod_value(), (double)0, add_double());
mom = thrust::reduce(thrust::device, vel_colloid + 1, vel_colloid + no_of_colloid + 1, point(0, 0, 0), add_point())*mass_colloid;
mom += thrust::reduce(thrust::device, vel_fl + 1, vel_fl + no_of_fluid + 1, point(0, 0, 0), add_point())*mass_fl;
ke_fluid = 0.5*mass_fl*thrust::transform_reduce(thrust::device, vel_fl + 1, vel_fl + no_of_fluid + 1, mod_value(), (double)0, add_double());
}
clock_t end = clock();
printf("%lf\n", (double)(end - begin)/CLOCKS_PER_SEC);
return 0;
}
| a207335d00940f468df7d295c004b215d9976aa3.cu | #include "parameters.cuh"
__global__ void imemset(int *x, int n) {
int i = blockIdx.x*blockDim.x + threadIdx.x + 1;
if(i <= n)
x[i] = 0;
}
int main(int argc, char *argv[]) {
clock_t begin = clock();
no_of_fluid = len.prod()*atoi(argv[1]), no_of_colloid = atoi(argv[2]);
point mom;
double ke_fluid, energy_colloid;
initialize();
initialize_rand();
initialize_colloid();
initialize_fluid();
create_box();
neighbour_list_mpcd();
neighbour_list_md();
compute_force_md();
tumble();
printf("After Tumble\n");
for(nn = 1; nn <= niter; nn++) {
if(!(nn%1000)) printf("%12d\n", nn);
rotation_mpcd();
run();
for(int l = 1; l <= n; l++) {
update_pos_md();
neighbour_list_md();
update_pos_mpcd();
neighbour_list_mpcd();
if(!(l%10) && nn > 10000) updown_velocity();
fluid_colloid_collision();
update_activity_direction();
compute_force_md();
update_velocity_colloid();
}
cudaDeviceSynchronize();
energy_colloid = *potential_colloid;
energy_colloid += 0.5*mass_colloid*thrust::transform_reduce(thrust::device, vel_colloid + 1, vel_colloid + no_of_colloid + 1, mod_value(), (double)0, add_double());
energy_colloid += 0.5*I_colloid*thrust::transform_reduce(thrust::device, ang_vel_colloid + 1, ang_vel_colloid + no_of_colloid + 1, mod_value(), (double)0, add_double());
mom = thrust::reduce(thrust::device, vel_colloid + 1, vel_colloid + no_of_colloid + 1, point(0, 0, 0), add_point())*mass_colloid;
mom += thrust::reduce(thrust::device, vel_fl + 1, vel_fl + no_of_fluid + 1, point(0, 0, 0), add_point())*mass_fl;
ke_fluid = 0.5*mass_fl*thrust::transform_reduce(thrust::device, vel_fl + 1, vel_fl + no_of_fluid + 1, mod_value(), (double)0, add_double());
}
clock_t end = clock();
printf("%lf\n", (double)(end - begin)/CLOCKS_PER_SEC);
return 0;
}
|
a2eebceef5af5ab64552a006690178415ef2135d.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <hip/hip_runtime_api.h>
#define BASE_TYPE float
__global__ void map(const BASE_TYPE *points, BASE_TYPE *result, const BASE_TYPE h)
{
extern __shared__ BASE_TYPE s[];
int index = blockDim.x * blockIdx.x + threadIdx.x;
s[threadIdx.x] = points[index] * h;
__syncthreads();
if (threadIdx.x == 0)
{
for (int i = 1; i < blockDim.x; i++)
s[0] += s[i];
result[blockIdx.x] = s[0];
}
}
BASE_TYPE reduce(const BASE_TYPE *dev_map, const int map_count)
{
BASE_TYPE *host_map = new BASE_TYPE[map_count];
BASE_TYPE result = 0;
hipMemcpy(host_map, dev_map, map_count * sizeof(BASE_TYPE), hipMemcpyDeviceToHost);
for (int i = 0; i < map_count; i++)
result += host_map[i];
return result;
}
BASE_TYPE func(BASE_TYPE x)
{
return x;
}
BASE_TYPE* points(const BASE_TYPE a, const BASE_TYPE b, const int N)
{
BASE_TYPE *p = new BASE_TYPE[N];
BASE_TYPE h = (b - a) / N;
for (int i = 0; i < N; i++)
{
p[i] = func(a + (i + 0.5) * h);
}
return p;
}
void cuda_init_array(BASE_TYPE **dev, const BASE_TYPE *host, const size_t size)
{
hipError_t err;
err = hipMalloc((void **)dev, size);
if (err != hipSuccess)
throw err;
if (host != NULL)
{
err = hipMemcpy(*dev, host, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
throw err;
}
}
void cuda_init_grid_and_block(dim3 *grid, dim3 *block, const int threads_per_block, const int N)
{
*grid = dim3(N / threads_per_block);
*block = dim3(threads_per_block);
printf("Block (%d, %d, %d)\n", block->x, block->y, block->z);
printf("Grid (%d, %d, %d)\n", grid->x, grid->y, grid->z);
}
int main()
{
const int N = 20;
const int threads_per_block = 5;
const int block_count = N / threads_per_block;
const size_t in_size = N * sizeof(BASE_TYPE);
const size_t out_size = block_count * sizeof(BASE_TYPE);
BASE_TYPE a = 0, b = 5;
dim3 blockDim, gridDim;
cuda_init_grid_and_block(&blockDim, &gridDim, threads_per_block, N);
BASE_TYPE *host_a = points(a, b, N), result;
BASE_TYPE *dev_a, *dev_result;
try
{
cuda_init_array(&dev_a, host_a, in_size);
cuda_init_array(&dev_result, NULL, out_size);
}
catch (hipError_t err)
{
fprintf(stderr, "Failed to allocate device (error code: %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipLaunchKernelGGL(( map), dim3(blockDim), dim3(gridDim), threads_per_block * sizeof(BASE_TYPE), 0, dev_a, dev_result, (b - a) / N);
result = reduce(dev_result, block_count);
printf("%3.2f\n", result);
hipFree(dev_a);
hipFree(dev_result);
delete[] host_a;
return 0;
} | a2eebceef5af5ab64552a006690178415ef2135d.cu | #include <cstdio>
#include <cstdlib>
#include <cmath>
#include <cuda_runtime_api.h>
#define BASE_TYPE float
__global__ void map(const BASE_TYPE *points, BASE_TYPE *result, const BASE_TYPE h)
{
extern __shared__ BASE_TYPE s[];
int index = blockDim.x * blockIdx.x + threadIdx.x;
s[threadIdx.x] = points[index] * h;
__syncthreads();
if (threadIdx.x == 0)
{
for (int i = 1; i < blockDim.x; i++)
s[0] += s[i];
result[blockIdx.x] = s[0];
}
}
BASE_TYPE reduce(const BASE_TYPE *dev_map, const int map_count)
{
BASE_TYPE *host_map = new BASE_TYPE[map_count];
BASE_TYPE result = 0;
cudaMemcpy(host_map, dev_map, map_count * sizeof(BASE_TYPE), cudaMemcpyDeviceToHost);
for (int i = 0; i < map_count; i++)
result += host_map[i];
return result;
}
BASE_TYPE func(BASE_TYPE x)
{
return x;
}
BASE_TYPE* points(const BASE_TYPE a, const BASE_TYPE b, const int N)
{
BASE_TYPE *p = new BASE_TYPE[N];
BASE_TYPE h = (b - a) / N;
for (int i = 0; i < N; i++)
{
p[i] = func(a + (i + 0.5) * h);
}
return p;
}
void cuda_init_array(BASE_TYPE **dev, const BASE_TYPE *host, const size_t size)
{
cudaError_t err;
err = cudaMalloc((void **)dev, size);
if (err != cudaSuccess)
throw err;
if (host != NULL)
{
err = cudaMemcpy(*dev, host, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
throw err;
}
}
void cuda_init_grid_and_block(dim3 *grid, dim3 *block, const int threads_per_block, const int N)
{
*grid = dim3(N / threads_per_block);
*block = dim3(threads_per_block);
printf("Block (%d, %d, %d)\n", block->x, block->y, block->z);
printf("Grid (%d, %d, %d)\n", grid->x, grid->y, grid->z);
}
int main()
{
const int N = 20;
const int threads_per_block = 5;
const int block_count = N / threads_per_block;
const size_t in_size = N * sizeof(BASE_TYPE);
const size_t out_size = block_count * sizeof(BASE_TYPE);
BASE_TYPE a = 0, b = 5;
dim3 blockDim, gridDim;
cuda_init_grid_and_block(&blockDim, &gridDim, threads_per_block, N);
BASE_TYPE *host_a = points(a, b, N), result;
BASE_TYPE *dev_a, *dev_result;
try
{
cuda_init_array(&dev_a, host_a, in_size);
cuda_init_array(&dev_result, NULL, out_size);
}
catch (cudaError_t err)
{
fprintf(stderr, "Failed to allocate device (error code: %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
map<<<blockDim, gridDim, threads_per_block * sizeof(BASE_TYPE)>>>(dev_a, dev_result, (b - a) / N);
result = reduce(dev_result, block_count);
printf("%3.2f\n", result);
cudaFree(dev_a);
cudaFree(dev_result);
delete[] host_a;
return 0;
} |
d29dadfb0c6266d01f79ed646b6037828820db17.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23) {
if (comp >= var_3 - tanhf((var_4 * var_5 + var_6))) {
for (int i=0; i < var_1; ++i) {
comp = var_7 / fabsf((+1.5544E6f + -1.2034E35f * var_8));
if (comp >= acosf(powf((+1.8476E-36f * (+0.0f * (-1.8124E-41f / +1.1954E-36f))), +0.0f * (var_9 - -1.7000E-36f / +1.1636E-41f)))) {
comp = -1.7824E-43f + var_10;
float tmp_1 = +1.8058E-26f;
comp = tmp_1 * (var_11 * asinf(+1.5915E-35f * +1.7185E-44f - var_12));
comp += -1.6238E11f / powf(cosf((-1.9095E35f - var_13)), -1.5804E-36f / var_14 - (var_15 - var_16 - var_17));
}
for (int i=0; i < var_2; ++i) {
comp = (var_18 / var_19);
comp = -1.0345E-44f * (var_20 / (var_21 - var_22 / var_23 / -1.6902E-37f));
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24);
hipDeviceSynchronize();
return 0;
}
| d29dadfb0c6266d01f79ed646b6037828820db17.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23) {
if (comp >= var_3 - tanhf((var_4 * var_5 + var_6))) {
for (int i=0; i < var_1; ++i) {
comp = var_7 / fabsf((+1.5544E6f + -1.2034E35f * var_8));
if (comp >= acosf(powf((+1.8476E-36f * (+0.0f * (-1.8124E-41f / +1.1954E-36f))), +0.0f * (var_9 - -1.7000E-36f / +1.1636E-41f)))) {
comp = -1.7824E-43f + var_10;
float tmp_1 = +1.8058E-26f;
comp = tmp_1 * (var_11 * asinf(+1.5915E-35f * +1.7185E-44f - var_12));
comp += -1.6238E11f / powf(cosf((-1.9095E35f - var_13)), -1.5804E-36f / var_14 - (var_15 - var_16 - var_17));
}
for (int i=0; i < var_2; ++i) {
comp = (var_18 / var_19);
comp = -1.0345E-44f * (var_20 / (var_21 - var_22 / var_23 / -1.6902E-37f));
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24);
cudaDeviceSynchronize();
return 0;
}
|
cfa88c5feb6222aa473c99693a511a37d1090ed6.hip | // !!! This is a file automatically generated by hipify!!!
//
// auto-generated by ops.py//
//header
#include "ops_lib_cpp.h"
#include "ops_cuda_rt_support.h"
#include "ops_cuda_reduction.h"
#ifdef OPS_MPI
#include "ops_mpi_core.h"
#endif
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
#undef OPS_ACC8
#undef OPS_ACC9
#undef OPS_ACC10
#undef OPS_ACC11
#undef OPS_ACC12
#undef OPS_ACC13
#undef OPS_ACC14
#undef OPS_ACC15
#undef OPS_ACC16
#undef OPS_ACC17
// global constants
__constant__ double dx;
__constant__ double dy;
void ops_decl_const_char(int dim, char const *type,
int size, char *dat, char const *name){
if (!strcmp(name,"dx")) {
cutilSafeCall(hipMemcpyToSymbol(dx, dat, dim*size));
}
else
if (!strcmp(name,"dy")) {
cutilSafeCall(hipMemcpyToSymbol(dy, dat, dim*size));
}
else
{
printf("error: unknown const name\n"); exit(1);
}
}
//user kernel files
#include "poisson_kernel_populate_cuda_kernel.cu"
#include "poisson_kernel_initialguess_cuda_kernel.cu"
#include "poisson_kernel_stencil_cuda_kernel.cu"
#include "poisson_kernel_update_cuda_kernel.cu"
#include "poisson_kernel_error_cuda_kernel.cu"
| cfa88c5feb6222aa473c99693a511a37d1090ed6.cu | //
// auto-generated by ops.py//
//header
#include "ops_lib_cpp.h"
#include "ops_cuda_rt_support.h"
#include "ops_cuda_reduction.h"
#ifdef OPS_MPI
#include "ops_mpi_core.h"
#endif
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#undef OPS_ACC7
#undef OPS_ACC8
#undef OPS_ACC9
#undef OPS_ACC10
#undef OPS_ACC11
#undef OPS_ACC12
#undef OPS_ACC13
#undef OPS_ACC14
#undef OPS_ACC15
#undef OPS_ACC16
#undef OPS_ACC17
// global constants
__constant__ double dx;
__constant__ double dy;
void ops_decl_const_char(int dim, char const *type,
int size, char *dat, char const *name){
if (!strcmp(name,"dx")) {
cutilSafeCall(cudaMemcpyToSymbol(dx, dat, dim*size));
}
else
if (!strcmp(name,"dy")) {
cutilSafeCall(cudaMemcpyToSymbol(dy, dat, dim*size));
}
else
{
printf("error: unknown const name\n"); exit(1);
}
}
//user kernel files
#include "poisson_kernel_populate_cuda_kernel.cu"
#include "poisson_kernel_initialguess_cuda_kernel.cu"
#include "poisson_kernel_stencil_cuda_kernel.cu"
#include "poisson_kernel_update_cuda_kernel.cu"
#include "poisson_kernel_error_cuda_kernel.cu"
|
20a0cfa9c6517e00959caf9685d28a27838a6959.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__constant__ float rcmbE[NPMLp2];
__constant__ float rcmaE[NPMLp2];
__constant__ float rcmbH[NPMLp2];
__constant__ float rcmaH[NPMLp2];
__global__ void update_cpml_x_E( int Nx, int Ny, int Nz, float *Ex, float *Ey,float *Ez, float *Hx, float *Hy,float *Hz, float *CEx, float *CEy,float *CEz, float *psi1, float *psi2, int backward ) {
int pidx = blockIdx.x*blockDim.x + threadIdx.x;
int Nyz = Ny*Nz;
int pi = pidx/Nyz + backward*(NPML+1);
int idx = pidx + ( 1 + backward*(Nx-NPML-2) )*Nyz;
int eidx = idx + Nyz;
psi1[pidx] = rcmbE[pi]*psi1[pidx] + rcmaE[pi]*( Hz[idx+Nyz] - Hz[idx] );
Ey[eidx] -= CEy[idx]*psi1[pidx];
psi2[pidx] = rcmbE[pi]*psi2[pidx] + rcmaE[pi]*( Hy[idx+Nyz] - Hy[idx] );
Ez[eidx] += CEz[idx]*psi2[pidx];
}
__global__ void update_cpml_x_H( int Nx, int Ny, int Nz, float *Ex, float *Ey,float *Ez, float *Hx, float *Hy,float *Hz, float *psi1, float *psi2, int backward ) {
int pidx = blockIdx.x*blockDim.x + threadIdx.x;
int Nyz = Ny*Nz;
int pi = pidx/Nyz + 1 + backward*(NPML+1);
int idx = pidx + ( 1 + backward*(Nx-NPML-1) )*Nyz;
int eidx = idx + Nyz;
psi1[pidx] = rcmbH[pi]*psi1[pidx] + rcmaH[pi]*( Ez[eidx] - Ez[eidx-Nyz] );
Hy[idx] += 0.5*psi1[pidx];
psi2[pidx] = rcmbH[pi]*psi2[pidx] + rcmaH[pi]*( Ey[eidx] - Ey[eidx-Nyz] );
Hz[idx] -= 0.5*psi2[pidx];
}
__global__ void update_cpml_y_E( int Ny, int Nz, float *Ex, float *Ey,float *Ez, float *Hx, float *Hy,float *Hz, float *CEx, float *CEy,float *CEz, float *psi1, float *psi2, int backward ) {
int pidx = blockIdx.x*blockDim.x + threadIdx.x;
int i = pidx/(NPML*Nz);
int pj = ( pidx/Nz )%NPML + backward*(NPML+1);
int idx = pidx + ( 1 + i*(Ny-NPML) + backward*(Ny-NPML-2) )*Nz;
int eidx = idx + Ny*Nz;
psi1[pidx] = rcmbE[pj]*psi1[pidx] + rcmaE[pj]*( Hx[idx+Nz] - Hx[idx] );
Ez[eidx] -= CEz[idx]*psi1[pidx];
psi2[pidx] = rcmbE[pj]*psi2[pidx] + rcmaE[pj]*( Hz[idx+Nz] - Hz[idx] );
Ex[eidx] += CEx[idx]*psi2[pidx];
}
__global__ void update_cpml_y_H( int Ny, int Nz, float *Ex, float *Ey,float *Ez, float *Hx, float *Hy,float *Hz, float *psi1, float *psi2, int backward ) {
int pidx = blockIdx.x*blockDim.x + threadIdx.x;
int i = pidx/(NPML*Nz);
int pj = ( pidx/Nz )%NPML + 1 + backward*(NPML+1);
int idx = pidx + ( 1 + i*(Ny-NPML) + backward*(Ny-NPML-1) )*Nz;
int eidx = idx + Ny*Nz;
psi1[pidx] = rcmbH[pj]*psi1[pidx] + rcmaH[pj]*( Ex[eidx] - Ex[eidx-Nz] );
Hz[idx] += 0.5*psi1[pidx];
psi2[pidx] = rcmbH[pj]*psi2[pidx] + rcmaH[pj]*( Ez[eidx] - Ez[eidx-Nz] );
Hx[idx] -= 0.5*psi2[pidx];
}
__global__ void update_cpml_z_E( int Ny, int Nz, float *Ex, float *Ey,float *Ez, float *Hx, float *Hy,float *Hz, float *CEx, float *CEy,float *CEz, float *psi1, float *psi2, int backward ) {
int tk = threadIdx.x;
int pidx = blockIdx.x*blockDim.x + tk;
int pk = pidx%NPMLp + backward*NPMLp;
int idx = pidx + 1 + (pidx/NPMLp)*(Nz-NPMLp) + backward*(Nz-NPMLp-1);
int eidx = idx + Ny*Nz;
extern __shared__ float hs[];
float* hx = (float*) hs;
float* hy = (float*) &hx[blockDim.x+1];
hx[tk] = Hx[idx];
hy[tk] = Hy[idx];
__syncthreads();
psi1[pidx] = rcmbE[pk]*psi1[pidx] + rcmaE[pk]*( hy[tk+1] - hy[tk] );
//psi1[pidx] = rcmbE[pk]*psi1[pidx] + rcmaE[pk]*( Hy[idx+1] - Hy[idx] );
Ex[eidx] -= CEx[idx]*psi1[pidx];
psi2[pidx] = rcmbE[pk]*psi2[pidx] + rcmaE[pk]*( hx[tk+1] - hx[tk] );
//psi2[pidx] = rcmbE[pk]*psi2[pidx] + rcmaE[pk]*( Hx[idx+1] - Hx[idx] );
Ey[eidx] += CEy[idx]*psi2[pidx];
}
__global__ void update_cpml_z_H( int Ny, int Nz, float *Ex, float *Ey,float *Ez, float *Hx, float *Hy,float *Hz, float *psi1, float *psi2, int backward ) {
int tk = threadIdx.x;
int pidx = blockIdx.x*blockDim.x + tk;
int pk = pidx%NPMLp + backward*NPMLp;
int idx = pidx + (pidx/NPMLp + backward)*(Nz-NPMLp);
int eidx = idx + Ny*Nz;
extern __shared__ float es[];
float* ex = (float*) es;
float* ey = (float*) &ex[blockDim.x+1];
ex[tk+1] = Ex[eidx];
ey[tk+1] = Ey[eidx];
__syncthreads();
psi1[pidx] = rcmbH[pk]*psi1[pidx] + rcmaH[pk]*( ey[tk+1] - ey[tk] );
//psi1[pidx] = rcmbH[pk]*psi1[pidx] + rcmaH[pk]*( Ey[eidx] - Ey[eidx-1] );
Hx[idx] += 0.5*psi1[pidx];
psi2[pidx] = rcmbH[pk]*psi2[pidx] + rcmaH[pk]*( ex[tk+1] - ex[tk] );
//psi2[pidx] = rcmbH[pk]*psi2[pidx] + rcmaH[pk]*( Ex[eidx] - Ex[eidx-1] );
Hy[idx] -= 0.5*psi2[pidx];
}
| 20a0cfa9c6517e00959caf9685d28a27838a6959.cu | __constant__ float rcmbE[NPMLp2];
__constant__ float rcmaE[NPMLp2];
__constant__ float rcmbH[NPMLp2];
__constant__ float rcmaH[NPMLp2];
__global__ void update_cpml_x_E( int Nx, int Ny, int Nz, float *Ex, float *Ey,float *Ez, float *Hx, float *Hy,float *Hz, float *CEx, float *CEy,float *CEz, float *psi1, float *psi2, int backward ) {
int pidx = blockIdx.x*blockDim.x + threadIdx.x;
int Nyz = Ny*Nz;
int pi = pidx/Nyz + backward*(NPML+1);
int idx = pidx + ( 1 + backward*(Nx-NPML-2) )*Nyz;
int eidx = idx + Nyz;
psi1[pidx] = rcmbE[pi]*psi1[pidx] + rcmaE[pi]*( Hz[idx+Nyz] - Hz[idx] );
Ey[eidx] -= CEy[idx]*psi1[pidx];
psi2[pidx] = rcmbE[pi]*psi2[pidx] + rcmaE[pi]*( Hy[idx+Nyz] - Hy[idx] );
Ez[eidx] += CEz[idx]*psi2[pidx];
}
__global__ void update_cpml_x_H( int Nx, int Ny, int Nz, float *Ex, float *Ey,float *Ez, float *Hx, float *Hy,float *Hz, float *psi1, float *psi2, int backward ) {
int pidx = blockIdx.x*blockDim.x + threadIdx.x;
int Nyz = Ny*Nz;
int pi = pidx/Nyz + 1 + backward*(NPML+1);
int idx = pidx + ( 1 + backward*(Nx-NPML-1) )*Nyz;
int eidx = idx + Nyz;
psi1[pidx] = rcmbH[pi]*psi1[pidx] + rcmaH[pi]*( Ez[eidx] - Ez[eidx-Nyz] );
Hy[idx] += 0.5*psi1[pidx];
psi2[pidx] = rcmbH[pi]*psi2[pidx] + rcmaH[pi]*( Ey[eidx] - Ey[eidx-Nyz] );
Hz[idx] -= 0.5*psi2[pidx];
}
__global__ void update_cpml_y_E( int Ny, int Nz, float *Ex, float *Ey,float *Ez, float *Hx, float *Hy,float *Hz, float *CEx, float *CEy,float *CEz, float *psi1, float *psi2, int backward ) {
int pidx = blockIdx.x*blockDim.x + threadIdx.x;
int i = pidx/(NPML*Nz);
int pj = ( pidx/Nz )%NPML + backward*(NPML+1);
int idx = pidx + ( 1 + i*(Ny-NPML) + backward*(Ny-NPML-2) )*Nz;
int eidx = idx + Ny*Nz;
psi1[pidx] = rcmbE[pj]*psi1[pidx] + rcmaE[pj]*( Hx[idx+Nz] - Hx[idx] );
Ez[eidx] -= CEz[idx]*psi1[pidx];
psi2[pidx] = rcmbE[pj]*psi2[pidx] + rcmaE[pj]*( Hz[idx+Nz] - Hz[idx] );
Ex[eidx] += CEx[idx]*psi2[pidx];
}
__global__ void update_cpml_y_H( int Ny, int Nz, float *Ex, float *Ey,float *Ez, float *Hx, float *Hy,float *Hz, float *psi1, float *psi2, int backward ) {
int pidx = blockIdx.x*blockDim.x + threadIdx.x;
int i = pidx/(NPML*Nz);
int pj = ( pidx/Nz )%NPML + 1 + backward*(NPML+1);
int idx = pidx + ( 1 + i*(Ny-NPML) + backward*(Ny-NPML-1) )*Nz;
int eidx = idx + Ny*Nz;
psi1[pidx] = rcmbH[pj]*psi1[pidx] + rcmaH[pj]*( Ex[eidx] - Ex[eidx-Nz] );
Hz[idx] += 0.5*psi1[pidx];
psi2[pidx] = rcmbH[pj]*psi2[pidx] + rcmaH[pj]*( Ez[eidx] - Ez[eidx-Nz] );
Hx[idx] -= 0.5*psi2[pidx];
}
__global__ void update_cpml_z_E( int Ny, int Nz, float *Ex, float *Ey,float *Ez, float *Hx, float *Hy,float *Hz, float *CEx, float *CEy,float *CEz, float *psi1, float *psi2, int backward ) {
int tk = threadIdx.x;
int pidx = blockIdx.x*blockDim.x + tk;
int pk = pidx%NPMLp + backward*NPMLp;
int idx = pidx + 1 + (pidx/NPMLp)*(Nz-NPMLp) + backward*(Nz-NPMLp-1);
int eidx = idx + Ny*Nz;
extern __shared__ float hs[];
float* hx = (float*) hs;
float* hy = (float*) &hx[blockDim.x+1];
hx[tk] = Hx[idx];
hy[tk] = Hy[idx];
__syncthreads();
psi1[pidx] = rcmbE[pk]*psi1[pidx] + rcmaE[pk]*( hy[tk+1] - hy[tk] );
//psi1[pidx] = rcmbE[pk]*psi1[pidx] + rcmaE[pk]*( Hy[idx+1] - Hy[idx] );
Ex[eidx] -= CEx[idx]*psi1[pidx];
psi2[pidx] = rcmbE[pk]*psi2[pidx] + rcmaE[pk]*( hx[tk+1] - hx[tk] );
//psi2[pidx] = rcmbE[pk]*psi2[pidx] + rcmaE[pk]*( Hx[idx+1] - Hx[idx] );
Ey[eidx] += CEy[idx]*psi2[pidx];
}
__global__ void update_cpml_z_H( int Ny, int Nz, float *Ex, float *Ey,float *Ez, float *Hx, float *Hy,float *Hz, float *psi1, float *psi2, int backward ) {
int tk = threadIdx.x;
int pidx = blockIdx.x*blockDim.x + tk;
int pk = pidx%NPMLp + backward*NPMLp;
int idx = pidx + (pidx/NPMLp + backward)*(Nz-NPMLp);
int eidx = idx + Ny*Nz;
extern __shared__ float es[];
float* ex = (float*) es;
float* ey = (float*) &ex[blockDim.x+1];
ex[tk+1] = Ex[eidx];
ey[tk+1] = Ey[eidx];
__syncthreads();
psi1[pidx] = rcmbH[pk]*psi1[pidx] + rcmaH[pk]*( ey[tk+1] - ey[tk] );
//psi1[pidx] = rcmbH[pk]*psi1[pidx] + rcmaH[pk]*( Ey[eidx] - Ey[eidx-1] );
Hx[idx] += 0.5*psi1[pidx];
psi2[pidx] = rcmbH[pk]*psi2[pidx] + rcmaH[pk]*( ex[tk+1] - ex[tk] );
//psi2[pidx] = rcmbH[pk]*psi2[pidx] + rcmaH[pk]*( Ex[eidx] - Ex[eidx-1] );
Hy[idx] -= 0.5*psi2[pidx];
}
|
b9ea1a7ca4942b6dbee353c363ca94f929ed7a8f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe/util/mm_func.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe
{
template <typename Dtype>
void __global__ matrix_multiply_kernel(
const int M, const int N, const int K,
const Dtype* A, const Dtype* B, Dtype* C)
{
// my own matrix multiplication code
double CValue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row > M || col > N) return ;
for(int e = 0; e< K; ++e)
CValue += (*(A + e + row * K)) * (*(B + col + e*N));
*(C + row * K + col) = CValue;
/* for(int i=0; i < M; i++ )
for(int n=0; n < N; n++)
for(int j=0; j < K; j++)*/
// C[i][blockIdx.x] = a[i][threadIdx.x] + b[threadIdx.x][i];
//*(C+ N * i + blockIdx.x) = *(A + K*i + threadIdx.x) * *(B + threadIdx.x * N + i);
// *(C+N*i+n) = (*(A + K*i + j)) * (*(B + j*N + i));
//*(C + N*i + n) = A[i][j] * B[j][i];
// C[i][n] =A[i][j]* B[j][i];
}
template <typename Dtype>
void matrix_multiply(const int M, const int N, const int K,
const Dtype* A, const Dtype* B, Dtype* C)
{
hipLaunchKernelGGL(( matrix_multiply_kernel<Dtype>), dim3(4), dim3(8), 0, 0, M,N,K,A,B,C);
// matrix_multiply_kernel<Dtype>(M, N, K, A, B, C);
// kernel called here
// cuBLAS matrix multiplication function
/* caffe_gpu_gemm<Dtype>(
CblasNoTrans, CblasNoTrans, M, N, K,
(Dtype)1., A, B,(Dtype)0., C);
*/
}
template
void matrix_multiply<float>(
const int M, const int N, const int K,
const float* A, const float* B, float* C);
template
void matrix_multiply<double>(
const int M, const int N, const int K,
const double* A, const double* B, double* C);
}
| b9ea1a7ca4942b6dbee353c363ca94f929ed7a8f.cu | #include "caffe/util/mm_func.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe
{
template <typename Dtype>
void __global__ matrix_multiply_kernel(
const int M, const int N, const int K,
const Dtype* A, const Dtype* B, Dtype* C)
{
// my own matrix multiplication code
double CValue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row > M || col > N) return ;
for(int e = 0; e< K; ++e)
CValue += (*(A + e + row * K)) * (*(B + col + e*N));
*(C + row * K + col) = CValue;
/* for(int i=0; i < M; i++ )
for(int n=0; n < N; n++)
for(int j=0; j < K; j++)*/
// C[i][blockIdx.x] = a[i][threadIdx.x] + b[threadIdx.x][i];
//*(C+ N * i + blockIdx.x) = *(A + K*i + threadIdx.x) * *(B + threadIdx.x * N + i);
// *(C+N*i+n) = (*(A + K*i + j)) * (*(B + j*N + i));
//*(C + N*i + n) = A[i][j] * B[j][i];
// C[i][n] =A[i][j]* B[j][i];
}
template <typename Dtype>
void matrix_multiply(const int M, const int N, const int K,
const Dtype* A, const Dtype* B, Dtype* C)
{
matrix_multiply_kernel<Dtype><<<4, 8>>>(M,N,K,A,B,C);
// matrix_multiply_kernel<Dtype>(M, N, K, A, B, C);
// kernel called here
// cuBLAS matrix multiplication function
/* caffe_gpu_gemm<Dtype>(
CblasNoTrans, CblasNoTrans, M, N, K,
(Dtype)1., A, B,(Dtype)0., C);
*/
}
template
void matrix_multiply<float>(
const int M, const int N, const int K,
const float* A, const float* B, float* C);
template
void matrix_multiply<double>(
const int M, const int N, const int K,
const double* A, const double* B, double* C);
}
|
66b5d82a3c2d5bb5530bfe7b2ccf5e5790f6547b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "des_kernel_encrypt.h"
#include "des_kernel_salt_instances.h"
#ifdef DESGPU_COMPILE_ALL_SALTS
void des_25_encrypt_salt2944(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 3, 4, 3, 20, 21, 22, 7, 24, 15, 16, 17, 18, 19, 20, 19, 4, 5, 6, 23, 8, 63, 32, 33, 34, 35, 36, 35, 52, 53, 54, 39, 56, 47, 48, 49, 50, 51, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2945(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 3, 4, 3, 20, 21, 22, 7, 24, 31, 16, 17, 18, 19, 20, 19, 4, 5, 6, 23, 8, 47, 32, 33, 34, 35, 36, 35, 52, 53, 54, 39, 56, 63, 48, 49, 50, 51, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2946(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 3, 4, 3, 20, 21, 22, 7, 24, 15, 0, 17, 18, 19, 20, 19, 4, 5, 6, 23, 8, 63, 48, 33, 34, 35, 36, 35, 52, 53, 54, 39, 56, 47, 32, 49, 50, 51, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2947(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 3, 4, 3, 20, 21, 22, 7, 24, 31, 0, 17, 18, 19, 20, 19, 4, 5, 6, 23, 8, 47, 48, 33, 34, 35, 36, 35, 52, 53, 54, 39, 56, 63, 32, 49, 50, 51, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2948(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 3, 4, 3, 20, 21, 22, 7, 24, 15, 16, 1, 18, 19, 20, 19, 4, 5, 6, 23, 8, 63, 32, 49, 34, 35, 36, 35, 52, 53, 54, 39, 56, 47, 48, 33, 50, 51, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2949(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 3, 4, 3, 20, 21, 22, 7, 24, 31, 16, 1, 18, 19, 20, 19, 4, 5, 6, 23, 8, 47, 32, 49, 34, 35, 36, 35, 52, 53, 54, 39, 56, 63, 48, 33, 50, 51, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2950(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 3, 4, 3, 20, 21, 22, 7, 24, 15, 0, 1, 18, 19, 20, 19, 4, 5, 6, 23, 8, 63, 48, 49, 34, 35, 36, 35, 52, 53, 54, 39, 56, 47, 32, 33, 50, 51, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2951(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 3, 4, 3, 20, 21, 22, 7, 24, 31, 0, 1, 18, 19, 20, 19, 4, 5, 6, 23, 8, 47, 48, 49, 34, 35, 36, 35, 52, 53, 54, 39, 56, 63, 32, 33, 50, 51, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2952(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 3, 4, 3, 20, 21, 22, 7, 24, 15, 16, 17, 2, 19, 20, 19, 4, 5, 6, 23, 8, 63, 32, 33, 50, 35, 36, 35, 52, 53, 54, 39, 56, 47, 48, 49, 34, 51, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2953(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 3, 4, 3, 20, 21, 22, 7, 24, 31, 16, 17, 2, 19, 20, 19, 4, 5, 6, 23, 8, 47, 32, 33, 50, 35, 36, 35, 52, 53, 54, 39, 56, 63, 48, 49, 34, 51, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2954(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 3, 4, 3, 20, 21, 22, 7, 24, 15, 0, 17, 2, 19, 20, 19, 4, 5, 6, 23, 8, 63, 48, 33, 50, 35, 36, 35, 52, 53, 54, 39, 56, 47, 32, 49, 34, 51, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2955(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 3, 4, 3, 20, 21, 22, 7, 24, 31, 0, 17, 2, 19, 20, 19, 4, 5, 6, 23, 8, 47, 48, 33, 50, 35, 36, 35, 52, 53, 54, 39, 56, 63, 32, 49, 34, 51, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2956(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 3, 4, 3, 20, 21, 22, 7, 24, 15, 16, 1, 2, 19, 20, 19, 4, 5, 6, 23, 8, 63, 32, 49, 50, 35, 36, 35, 52, 53, 54, 39, 56, 47, 48, 33, 34, 51, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2957(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 3, 4, 3, 20, 21, 22, 7, 24, 31, 16, 1, 2, 19, 20, 19, 4, 5, 6, 23, 8, 47, 32, 49, 50, 35, 36, 35, 52, 53, 54, 39, 56, 63, 48, 33, 34, 51, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2958(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 3, 4, 3, 20, 21, 22, 7, 24, 15, 0, 1, 2, 19, 20, 19, 4, 5, 6, 23, 8, 63, 48, 49, 50, 35, 36, 35, 52, 53, 54, 39, 56, 47, 32, 33, 34, 51, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2959(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 3, 4, 3, 20, 21, 22, 7, 24, 31, 0, 1, 2, 19, 20, 19, 4, 5, 6, 23, 8, 47, 48, 49, 50, 35, 36, 35, 52, 53, 54, 39, 56, 63, 32, 33, 34, 51, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2960(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 19, 4, 3, 20, 21, 22, 7, 24, 15, 16, 17, 18, 3, 20, 19, 4, 5, 6, 23, 8, 63, 32, 33, 34, 51, 36, 35, 52, 53, 54, 39, 56, 47, 48, 49, 50, 35, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2961(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 19, 4, 3, 20, 21, 22, 7, 24, 31, 16, 17, 18, 3, 20, 19, 4, 5, 6, 23, 8, 47, 32, 33, 34, 51, 36, 35, 52, 53, 54, 39, 56, 63, 48, 49, 50, 35, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2962(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 19, 4, 3, 20, 21, 22, 7, 24, 15, 0, 17, 18, 3, 20, 19, 4, 5, 6, 23, 8, 63, 48, 33, 34, 51, 36, 35, 52, 53, 54, 39, 56, 47, 32, 49, 50, 35, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2963(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 19, 4, 3, 20, 21, 22, 7, 24, 31, 0, 17, 18, 3, 20, 19, 4, 5, 6, 23, 8, 47, 48, 33, 34, 51, 36, 35, 52, 53, 54, 39, 56, 63, 32, 49, 50, 35, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2964(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 19, 4, 3, 20, 21, 22, 7, 24, 15, 16, 1, 18, 3, 20, 19, 4, 5, 6, 23, 8, 63, 32, 49, 34, 51, 36, 35, 52, 53, 54, 39, 56, 47, 48, 33, 50, 35, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2965(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 19, 4, 3, 20, 21, 22, 7, 24, 31, 16, 1, 18, 3, 20, 19, 4, 5, 6, 23, 8, 47, 32, 49, 34, 51, 36, 35, 52, 53, 54, 39, 56, 63, 48, 33, 50, 35, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2966(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 19, 4, 3, 20, 21, 22, 7, 24, 15, 0, 1, 18, 3, 20, 19, 4, 5, 6, 23, 8, 63, 48, 49, 34, 51, 36, 35, 52, 53, 54, 39, 56, 47, 32, 33, 50, 35, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2967(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 19, 4, 3, 20, 21, 22, 7, 24, 31, 0, 1, 18, 3, 20, 19, 4, 5, 6, 23, 8, 47, 48, 49, 34, 51, 36, 35, 52, 53, 54, 39, 56, 63, 32, 33, 50, 35, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2968(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 19, 4, 3, 20, 21, 22, 7, 24, 15, 16, 17, 2, 3, 20, 19, 4, 5, 6, 23, 8, 63, 32, 33, 50, 51, 36, 35, 52, 53, 54, 39, 56, 47, 48, 49, 34, 35, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2969(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 19, 4, 3, 20, 21, 22, 7, 24, 31, 16, 17, 2, 3, 20, 19, 4, 5, 6, 23, 8, 47, 32, 33, 50, 51, 36, 35, 52, 53, 54, 39, 56, 63, 48, 49, 34, 35, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2970(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 19, 4, 3, 20, 21, 22, 7, 24, 15, 0, 17, 2, 3, 20, 19, 4, 5, 6, 23, 8, 63, 48, 33, 50, 51, 36, 35, 52, 53, 54, 39, 56, 47, 32, 49, 34, 35, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2971(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 19, 4, 3, 20, 21, 22, 7, 24, 31, 0, 17, 2, 3, 20, 19, 4, 5, 6, 23, 8, 47, 48, 33, 50, 51, 36, 35, 52, 53, 54, 39, 56, 63, 32, 49, 34, 35, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2972(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 19, 4, 3, 20, 21, 22, 7, 24, 15, 16, 1, 2, 3, 20, 19, 4, 5, 6, 23, 8, 63, 32, 49, 50, 51, 36, 35, 52, 53, 54, 39, 56, 47, 48, 33, 34, 35, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2973(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 19, 4, 3, 20, 21, 22, 7, 24, 31, 16, 1, 2, 3, 20, 19, 4, 5, 6, 23, 8, 47, 32, 49, 50, 51, 36, 35, 52, 53, 54, 39, 56, 63, 48, 33, 34, 35, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2974(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 19, 4, 3, 20, 21, 22, 7, 24, 15, 0, 1, 2, 3, 20, 19, 4, 5, 6, 23, 8, 63, 48, 49, 50, 51, 36, 35, 52, 53, 54, 39, 56, 47, 32, 33, 34, 35, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2975(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 19, 4, 3, 20, 21, 22, 7, 24, 31, 0, 1, 2, 3, 20, 19, 4, 5, 6, 23, 8, 47, 48, 49, 50, 51, 36, 35, 52, 53, 54, 39, 56, 63, 32, 33, 34, 35, 52, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2976(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 3, 20, 3, 20, 21, 22, 7, 24, 15, 16, 17, 18, 19, 4, 19, 4, 5, 6, 23, 8, 63, 32, 33, 34, 35, 52, 35, 52, 53, 54, 39, 56, 47, 48, 49, 50, 51, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2977(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 3, 20, 3, 20, 21, 22, 7, 24, 31, 16, 17, 18, 19, 4, 19, 4, 5, 6, 23, 8, 47, 32, 33, 34, 35, 52, 35, 52, 53, 54, 39, 56, 63, 48, 49, 50, 51, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2978(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 3, 20, 3, 20, 21, 22, 7, 24, 15, 0, 17, 18, 19, 4, 19, 4, 5, 6, 23, 8, 63, 48, 33, 34, 35, 52, 35, 52, 53, 54, 39, 56, 47, 32, 49, 50, 51, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2979(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 3, 20, 3, 20, 21, 22, 7, 24, 31, 0, 17, 18, 19, 4, 19, 4, 5, 6, 23, 8, 47, 48, 33, 34, 35, 52, 35, 52, 53, 54, 39, 56, 63, 32, 49, 50, 51, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2980(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 3, 20, 3, 20, 21, 22, 7, 24, 15, 16, 1, 18, 19, 4, 19, 4, 5, 6, 23, 8, 63, 32, 49, 34, 35, 52, 35, 52, 53, 54, 39, 56, 47, 48, 33, 50, 51, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2981(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 3, 20, 3, 20, 21, 22, 7, 24, 31, 16, 1, 18, 19, 4, 19, 4, 5, 6, 23, 8, 47, 32, 49, 34, 35, 52, 35, 52, 53, 54, 39, 56, 63, 48, 33, 50, 51, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2982(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 3, 20, 3, 20, 21, 22, 7, 24, 15, 0, 1, 18, 19, 4, 19, 4, 5, 6, 23, 8, 63, 48, 49, 34, 35, 52, 35, 52, 53, 54, 39, 56, 47, 32, 33, 50, 51, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2983(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 3, 20, 3, 20, 21, 22, 7, 24, 31, 0, 1, 18, 19, 4, 19, 4, 5, 6, 23, 8, 47, 48, 49, 34, 35, 52, 35, 52, 53, 54, 39, 56, 63, 32, 33, 50, 51, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2984(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 3, 20, 3, 20, 21, 22, 7, 24, 15, 16, 17, 2, 19, 4, 19, 4, 5, 6, 23, 8, 63, 32, 33, 50, 35, 52, 35, 52, 53, 54, 39, 56, 47, 48, 49, 34, 51, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2985(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 3, 20, 3, 20, 21, 22, 7, 24, 31, 16, 17, 2, 19, 4, 19, 4, 5, 6, 23, 8, 47, 32, 33, 50, 35, 52, 35, 52, 53, 54, 39, 56, 63, 48, 49, 34, 51, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2986(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 3, 20, 3, 20, 21, 22, 7, 24, 15, 0, 17, 2, 19, 4, 19, 4, 5, 6, 23, 8, 63, 48, 33, 50, 35, 52, 35, 52, 53, 54, 39, 56, 47, 32, 49, 34, 51, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2987(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 3, 20, 3, 20, 21, 22, 7, 24, 31, 0, 17, 2, 19, 4, 19, 4, 5, 6, 23, 8, 47, 48, 33, 50, 35, 52, 35, 52, 53, 54, 39, 56, 63, 32, 49, 34, 51, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2988(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 3, 20, 3, 20, 21, 22, 7, 24, 15, 16, 1, 2, 19, 4, 19, 4, 5, 6, 23, 8, 63, 32, 49, 50, 35, 52, 35, 52, 53, 54, 39, 56, 47, 48, 33, 34, 51, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2989(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 3, 20, 3, 20, 21, 22, 7, 24, 31, 16, 1, 2, 19, 4, 19, 4, 5, 6, 23, 8, 47, 32, 49, 50, 35, 52, 35, 52, 53, 54, 39, 56, 63, 48, 33, 34, 51, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2990(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 3, 20, 3, 20, 21, 22, 7, 24, 15, 0, 1, 2, 19, 4, 19, 4, 5, 6, 23, 8, 63, 48, 49, 50, 35, 52, 35, 52, 53, 54, 39, 56, 47, 32, 33, 34, 51, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2991(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 3, 20, 3, 20, 21, 22, 7, 24, 31, 0, 1, 2, 19, 4, 19, 4, 5, 6, 23, 8, 47, 48, 49, 50, 35, 52, 35, 52, 53, 54, 39, 56, 63, 32, 33, 34, 51, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2992(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 19, 20, 3, 20, 21, 22, 7, 24, 15, 16, 17, 18, 3, 4, 19, 4, 5, 6, 23, 8, 63, 32, 33, 34, 51, 52, 35, 52, 53, 54, 39, 56, 47, 48, 49, 50, 35, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2993(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 19, 20, 3, 20, 21, 22, 7, 24, 31, 16, 17, 18, 3, 4, 19, 4, 5, 6, 23, 8, 47, 32, 33, 34, 51, 52, 35, 52, 53, 54, 39, 56, 63, 48, 49, 50, 35, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2994(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 19, 20, 3, 20, 21, 22, 7, 24, 15, 0, 17, 18, 3, 4, 19, 4, 5, 6, 23, 8, 63, 48, 33, 34, 51, 52, 35, 52, 53, 54, 39, 56, 47, 32, 49, 50, 35, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2995(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 19, 20, 3, 20, 21, 22, 7, 24, 31, 0, 17, 18, 3, 4, 19, 4, 5, 6, 23, 8, 47, 48, 33, 34, 51, 52, 35, 52, 53, 54, 39, 56, 63, 32, 49, 50, 35, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2996(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 19, 20, 3, 20, 21, 22, 7, 24, 15, 16, 1, 18, 3, 4, 19, 4, 5, 6, 23, 8, 63, 32, 49, 34, 51, 52, 35, 52, 53, 54, 39, 56, 47, 48, 33, 50, 35, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2997(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 19, 20, 3, 20, 21, 22, 7, 24, 31, 16, 1, 18, 3, 4, 19, 4, 5, 6, 23, 8, 47, 32, 49, 34, 51, 52, 35, 52, 53, 54, 39, 56, 63, 48, 33, 50, 35, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2998(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 19, 20, 3, 20, 21, 22, 7, 24, 15, 0, 1, 18, 3, 4, 19, 4, 5, 6, 23, 8, 63, 48, 49, 34, 51, 52, 35, 52, 53, 54, 39, 56, 47, 32, 33, 50, 35, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2999(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 19, 20, 3, 20, 21, 22, 7, 24, 31, 0, 1, 18, 3, 4, 19, 4, 5, 6, 23, 8, 47, 48, 49, 34, 51, 52, 35, 52, 53, 54, 39, 56, 63, 32, 33, 50, 35, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3000(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 19, 20, 3, 20, 21, 22, 7, 24, 15, 16, 17, 2, 3, 4, 19, 4, 5, 6, 23, 8, 63, 32, 33, 50, 51, 52, 35, 52, 53, 54, 39, 56, 47, 48, 49, 34, 35, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3001(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 19, 20, 3, 20, 21, 22, 7, 24, 31, 16, 17, 2, 3, 4, 19, 4, 5, 6, 23, 8, 47, 32, 33, 50, 51, 52, 35, 52, 53, 54, 39, 56, 63, 48, 49, 34, 35, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3002(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 19, 20, 3, 20, 21, 22, 7, 24, 15, 0, 17, 2, 3, 4, 19, 4, 5, 6, 23, 8, 63, 48, 33, 50, 51, 52, 35, 52, 53, 54, 39, 56, 47, 32, 49, 34, 35, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3003(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 19, 20, 3, 20, 21, 22, 7, 24, 31, 0, 17, 2, 3, 4, 19, 4, 5, 6, 23, 8, 47, 48, 33, 50, 51, 52, 35, 52, 53, 54, 39, 56, 63, 32, 49, 34, 35, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3004(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 19, 20, 3, 20, 21, 22, 7, 24, 15, 16, 1, 2, 3, 4, 19, 4, 5, 6, 23, 8, 63, 32, 49, 50, 51, 52, 35, 52, 53, 54, 39, 56, 47, 48, 33, 34, 35, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3005(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 19, 20, 3, 20, 21, 22, 7, 24, 31, 16, 1, 2, 3, 4, 19, 4, 5, 6, 23, 8, 47, 32, 49, 50, 51, 52, 35, 52, 53, 54, 39, 56, 63, 48, 33, 34, 35, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3006(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 19, 20, 3, 20, 21, 22, 7, 24, 15, 0, 1, 2, 3, 4, 19, 4, 5, 6, 23, 8, 63, 48, 49, 50, 51, 52, 35, 52, 53, 54, 39, 56, 47, 32, 33, 34, 35, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3007(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 19, 20, 3, 20, 21, 22, 7, 24, 31, 0, 1, 2, 3, 4, 19, 4, 5, 6, 23, 8, 47, 48, 49, 50, 51, 52, 35, 52, 53, 54, 39, 56, 63, 32, 33, 34, 35, 36, 51, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3008(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 3, 4, 19, 20, 21, 22, 7, 24, 15, 16, 17, 18, 19, 20, 3, 4, 5, 6, 23, 8, 63, 32, 33, 34, 35, 36, 51, 52, 53, 54, 39, 56, 47, 48, 49, 50, 51, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3009(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 3, 4, 19, 20, 21, 22, 7, 24, 31, 16, 17, 18, 19, 20, 3, 4, 5, 6, 23, 8, 47, 32, 33, 34, 35, 36, 51, 52, 53, 54, 39, 56, 63, 48, 49, 50, 51, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3010(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 3, 4, 19, 20, 21, 22, 7, 24, 15, 0, 17, 18, 19, 20, 3, 4, 5, 6, 23, 8, 63, 48, 33, 34, 35, 36, 51, 52, 53, 54, 39, 56, 47, 32, 49, 50, 51, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3011(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 3, 4, 19, 20, 21, 22, 7, 24, 31, 0, 17, 18, 19, 20, 3, 4, 5, 6, 23, 8, 47, 48, 33, 34, 35, 36, 51, 52, 53, 54, 39, 56, 63, 32, 49, 50, 51, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3012(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 3, 4, 19, 20, 21, 22, 7, 24, 15, 16, 1, 18, 19, 20, 3, 4, 5, 6, 23, 8, 63, 32, 49, 34, 35, 36, 51, 52, 53, 54, 39, 56, 47, 48, 33, 50, 51, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3013(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 3, 4, 19, 20, 21, 22, 7, 24, 31, 16, 1, 18, 19, 20, 3, 4, 5, 6, 23, 8, 47, 32, 49, 34, 35, 36, 51, 52, 53, 54, 39, 56, 63, 48, 33, 50, 51, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3014(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 3, 4, 19, 20, 21, 22, 7, 24, 15, 0, 1, 18, 19, 20, 3, 4, 5, 6, 23, 8, 63, 48, 49, 34, 35, 36, 51, 52, 53, 54, 39, 56, 47, 32, 33, 50, 51, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3015(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 3, 4, 19, 20, 21, 22, 7, 24, 31, 0, 1, 18, 19, 20, 3, 4, 5, 6, 23, 8, 47, 48, 49, 34, 35, 36, 51, 52, 53, 54, 39, 56, 63, 32, 33, 50, 51, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3016(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 3, 4, 19, 20, 21, 22, 7, 24, 15, 16, 17, 2, 19, 20, 3, 4, 5, 6, 23, 8, 63, 32, 33, 50, 35, 36, 51, 52, 53, 54, 39, 56, 47, 48, 49, 34, 51, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3017(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 3, 4, 19, 20, 21, 22, 7, 24, 31, 16, 17, 2, 19, 20, 3, 4, 5, 6, 23, 8, 47, 32, 33, 50, 35, 36, 51, 52, 53, 54, 39, 56, 63, 48, 49, 34, 51, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3018(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 3, 4, 19, 20, 21, 22, 7, 24, 15, 0, 17, 2, 19, 20, 3, 4, 5, 6, 23, 8, 63, 48, 33, 50, 35, 36, 51, 52, 53, 54, 39, 56, 47, 32, 49, 34, 51, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3019(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 3, 4, 19, 20, 21, 22, 7, 24, 31, 0, 17, 2, 19, 20, 3, 4, 5, 6, 23, 8, 47, 48, 33, 50, 35, 36, 51, 52, 53, 54, 39, 56, 63, 32, 49, 34, 51, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3020(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 3, 4, 19, 20, 21, 22, 7, 24, 15, 16, 1, 2, 19, 20, 3, 4, 5, 6, 23, 8, 63, 32, 49, 50, 35, 36, 51, 52, 53, 54, 39, 56, 47, 48, 33, 34, 51, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3021(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 3, 4, 19, 20, 21, 22, 7, 24, 31, 16, 1, 2, 19, 20, 3, 4, 5, 6, 23, 8, 47, 32, 49, 50, 35, 36, 51, 52, 53, 54, 39, 56, 63, 48, 33, 34, 51, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3022(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 3, 4, 19, 20, 21, 22, 7, 24, 15, 0, 1, 2, 19, 20, 3, 4, 5, 6, 23, 8, 63, 48, 49, 50, 35, 36, 51, 52, 53, 54, 39, 56, 47, 32, 33, 34, 51, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3023(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 3, 4, 19, 20, 21, 22, 7, 24, 31, 0, 1, 2, 19, 20, 3, 4, 5, 6, 23, 8, 47, 48, 49, 50, 35, 36, 51, 52, 53, 54, 39, 56, 63, 32, 33, 34, 51, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3024(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 19, 4, 19, 20, 21, 22, 7, 24, 15, 16, 17, 18, 3, 20, 3, 4, 5, 6, 23, 8, 63, 32, 33, 34, 51, 36, 51, 52, 53, 54, 39, 56, 47, 48, 49, 50, 35, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3025(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 19, 4, 19, 20, 21, 22, 7, 24, 31, 16, 17, 18, 3, 20, 3, 4, 5, 6, 23, 8, 47, 32, 33, 34, 51, 36, 51, 52, 53, 54, 39, 56, 63, 48, 49, 50, 35, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3026(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 19, 4, 19, 20, 21, 22, 7, 24, 15, 0, 17, 18, 3, 20, 3, 4, 5, 6, 23, 8, 63, 48, 33, 34, 51, 36, 51, 52, 53, 54, 39, 56, 47, 32, 49, 50, 35, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3027(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 19, 4, 19, 20, 21, 22, 7, 24, 31, 0, 17, 18, 3, 20, 3, 4, 5, 6, 23, 8, 47, 48, 33, 34, 51, 36, 51, 52, 53, 54, 39, 56, 63, 32, 49, 50, 35, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3028(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 19, 4, 19, 20, 21, 22, 7, 24, 15, 16, 1, 18, 3, 20, 3, 4, 5, 6, 23, 8, 63, 32, 49, 34, 51, 36, 51, 52, 53, 54, 39, 56, 47, 48, 33, 50, 35, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3029(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 19, 4, 19, 20, 21, 22, 7, 24, 31, 16, 1, 18, 3, 20, 3, 4, 5, 6, 23, 8, 47, 32, 49, 34, 51, 36, 51, 52, 53, 54, 39, 56, 63, 48, 33, 50, 35, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3030(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 19, 4, 19, 20, 21, 22, 7, 24, 15, 0, 1, 18, 3, 20, 3, 4, 5, 6, 23, 8, 63, 48, 49, 34, 51, 36, 51, 52, 53, 54, 39, 56, 47, 32, 33, 50, 35, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3031(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 19, 4, 19, 20, 21, 22, 7, 24, 31, 0, 1, 18, 3, 20, 3, 4, 5, 6, 23, 8, 47, 48, 49, 34, 51, 36, 51, 52, 53, 54, 39, 56, 63, 32, 33, 50, 35, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3032(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 19, 4, 19, 20, 21, 22, 7, 24, 15, 16, 17, 2, 3, 20, 3, 4, 5, 6, 23, 8, 63, 32, 33, 50, 51, 36, 51, 52, 53, 54, 39, 56, 47, 48, 49, 34, 35, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3033(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 19, 4, 19, 20, 21, 22, 7, 24, 31, 16, 17, 2, 3, 20, 3, 4, 5, 6, 23, 8, 47, 32, 33, 50, 51, 36, 51, 52, 53, 54, 39, 56, 63, 48, 49, 34, 35, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3034(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 19, 4, 19, 20, 21, 22, 7, 24, 15, 0, 17, 2, 3, 20, 3, 4, 5, 6, 23, 8, 63, 48, 33, 50, 51, 36, 51, 52, 53, 54, 39, 56, 47, 32, 49, 34, 35, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3035(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 19, 4, 19, 20, 21, 22, 7, 24, 31, 0, 17, 2, 3, 20, 3, 4, 5, 6, 23, 8, 47, 48, 33, 50, 51, 36, 51, 52, 53, 54, 39, 56, 63, 32, 49, 34, 35, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3036(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 19, 4, 19, 20, 21, 22, 7, 24, 15, 16, 1, 2, 3, 20, 3, 4, 5, 6, 23, 8, 63, 32, 49, 50, 51, 36, 51, 52, 53, 54, 39, 56, 47, 48, 33, 34, 35, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3037(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 19, 4, 19, 20, 21, 22, 7, 24, 31, 16, 1, 2, 3, 20, 3, 4, 5, 6, 23, 8, 47, 32, 49, 50, 51, 36, 51, 52, 53, 54, 39, 56, 63, 48, 33, 34, 35, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3038(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 19, 4, 19, 20, 21, 22, 7, 24, 15, 0, 1, 2, 3, 20, 3, 4, 5, 6, 23, 8, 63, 48, 49, 50, 51, 36, 51, 52, 53, 54, 39, 56, 47, 32, 33, 34, 35, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3039(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 19, 4, 19, 20, 21, 22, 7, 24, 31, 0, 1, 2, 3, 20, 3, 4, 5, 6, 23, 8, 47, 48, 49, 50, 51, 36, 51, 52, 53, 54, 39, 56, 63, 32, 33, 34, 35, 52, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3040(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 3, 20, 19, 20, 21, 22, 7, 24, 15, 16, 17, 18, 19, 4, 3, 4, 5, 6, 23, 8, 63, 32, 33, 34, 35, 52, 51, 52, 53, 54, 39, 56, 47, 48, 49, 50, 51, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3041(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 3, 20, 19, 20, 21, 22, 7, 24, 31, 16, 17, 18, 19, 4, 3, 4, 5, 6, 23, 8, 47, 32, 33, 34, 35, 52, 51, 52, 53, 54, 39, 56, 63, 48, 49, 50, 51, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3042(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 3, 20, 19, 20, 21, 22, 7, 24, 15, 0, 17, 18, 19, 4, 3, 4, 5, 6, 23, 8, 63, 48, 33, 34, 35, 52, 51, 52, 53, 54, 39, 56, 47, 32, 49, 50, 51, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3043(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 3, 20, 19, 20, 21, 22, 7, 24, 31, 0, 17, 18, 19, 4, 3, 4, 5, 6, 23, 8, 47, 48, 33, 34, 35, 52, 51, 52, 53, 54, 39, 56, 63, 32, 49, 50, 51, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3044(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 3, 20, 19, 20, 21, 22, 7, 24, 15, 16, 1, 18, 19, 4, 3, 4, 5, 6, 23, 8, 63, 32, 49, 34, 35, 52, 51, 52, 53, 54, 39, 56, 47, 48, 33, 50, 51, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3045(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 3, 20, 19, 20, 21, 22, 7, 24, 31, 16, 1, 18, 19, 4, 3, 4, 5, 6, 23, 8, 47, 32, 49, 34, 35, 52, 51, 52, 53, 54, 39, 56, 63, 48, 33, 50, 51, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3046(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 3, 20, 19, 20, 21, 22, 7, 24, 15, 0, 1, 18, 19, 4, 3, 4, 5, 6, 23, 8, 63, 48, 49, 34, 35, 52, 51, 52, 53, 54, 39, 56, 47, 32, 33, 50, 51, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3047(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 3, 20, 19, 20, 21, 22, 7, 24, 31, 0, 1, 18, 19, 4, 3, 4, 5, 6, 23, 8, 47, 48, 49, 34, 35, 52, 51, 52, 53, 54, 39, 56, 63, 32, 33, 50, 51, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3048(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 3, 20, 19, 20, 21, 22, 7, 24, 15, 16, 17, 2, 19, 4, 3, 4, 5, 6, 23, 8, 63, 32, 33, 50, 35, 52, 51, 52, 53, 54, 39, 56, 47, 48, 49, 34, 51, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3049(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 3, 20, 19, 20, 21, 22, 7, 24, 31, 16, 17, 2, 19, 4, 3, 4, 5, 6, 23, 8, 47, 32, 33, 50, 35, 52, 51, 52, 53, 54, 39, 56, 63, 48, 49, 34, 51, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3050(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 3, 20, 19, 20, 21, 22, 7, 24, 15, 0, 17, 2, 19, 4, 3, 4, 5, 6, 23, 8, 63, 48, 33, 50, 35, 52, 51, 52, 53, 54, 39, 56, 47, 32, 49, 34, 51, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3051(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 3, 20, 19, 20, 21, 22, 7, 24, 31, 0, 17, 2, 19, 4, 3, 4, 5, 6, 23, 8, 47, 48, 33, 50, 35, 52, 51, 52, 53, 54, 39, 56, 63, 32, 49, 34, 51, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3052(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 3, 20, 19, 20, 21, 22, 7, 24, 15, 16, 1, 2, 19, 4, 3, 4, 5, 6, 23, 8, 63, 32, 49, 50, 35, 52, 51, 52, 53, 54, 39, 56, 47, 48, 33, 34, 51, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3053(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 3, 20, 19, 20, 21, 22, 7, 24, 31, 16, 1, 2, 19, 4, 3, 4, 5, 6, 23, 8, 47, 32, 49, 50, 35, 52, 51, 52, 53, 54, 39, 56, 63, 48, 33, 34, 51, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3054(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 3, 20, 19, 20, 21, 22, 7, 24, 15, 0, 1, 2, 19, 4, 3, 4, 5, 6, 23, 8, 63, 48, 49, 50, 35, 52, 51, 52, 53, 54, 39, 56, 47, 32, 33, 34, 51, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3055(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 3, 20, 19, 20, 21, 22, 7, 24, 31, 0, 1, 2, 19, 4, 3, 4, 5, 6, 23, 8, 47, 48, 49, 50, 35, 52, 51, 52, 53, 54, 39, 56, 63, 32, 33, 34, 51, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3056(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 19, 20, 19, 20, 21, 22, 7, 24, 15, 16, 17, 18, 3, 4, 3, 4, 5, 6, 23, 8, 63, 32, 33, 34, 51, 52, 51, 52, 53, 54, 39, 56, 47, 48, 49, 50, 35, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3057(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 19, 20, 19, 20, 21, 22, 7, 24, 31, 16, 17, 18, 3, 4, 3, 4, 5, 6, 23, 8, 47, 32, 33, 34, 51, 52, 51, 52, 53, 54, 39, 56, 63, 48, 49, 50, 35, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3058(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 19, 20, 19, 20, 21, 22, 7, 24, 15, 0, 17, 18, 3, 4, 3, 4, 5, 6, 23, 8, 63, 48, 33, 34, 51, 52, 51, 52, 53, 54, 39, 56, 47, 32, 49, 50, 35, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3059(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 19, 20, 19, 20, 21, 22, 7, 24, 31, 0, 17, 18, 3, 4, 3, 4, 5, 6, 23, 8, 47, 48, 33, 34, 51, 52, 51, 52, 53, 54, 39, 56, 63, 32, 49, 50, 35, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3060(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 19, 20, 19, 20, 21, 22, 7, 24, 15, 16, 1, 18, 3, 4, 3, 4, 5, 6, 23, 8, 63, 32, 49, 34, 51, 52, 51, 52, 53, 54, 39, 56, 47, 48, 33, 50, 35, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3061(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 19, 20, 19, 20, 21, 22, 7, 24, 31, 16, 1, 18, 3, 4, 3, 4, 5, 6, 23, 8, 47, 32, 49, 34, 51, 52, 51, 52, 53, 54, 39, 56, 63, 48, 33, 50, 35, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3062(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 19, 20, 19, 20, 21, 22, 7, 24, 15, 0, 1, 18, 3, 4, 3, 4, 5, 6, 23, 8, 63, 48, 49, 34, 51, 52, 51, 52, 53, 54, 39, 56, 47, 32, 33, 50, 35, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3063(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 19, 20, 19, 20, 21, 22, 7, 24, 31, 0, 1, 18, 3, 4, 3, 4, 5, 6, 23, 8, 47, 48, 49, 34, 51, 52, 51, 52, 53, 54, 39, 56, 63, 32, 33, 50, 35, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3064(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 19, 20, 19, 20, 21, 22, 7, 24, 15, 16, 17, 2, 3, 4, 3, 4, 5, 6, 23, 8, 63, 32, 33, 50, 51, 52, 51, 52, 53, 54, 39, 56, 47, 48, 49, 34, 35, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3065(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 19, 20, 19, 20, 21, 22, 7, 24, 31, 16, 17, 2, 3, 4, 3, 4, 5, 6, 23, 8, 47, 32, 33, 50, 51, 52, 51, 52, 53, 54, 39, 56, 63, 48, 49, 34, 35, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3066(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 19, 20, 19, 20, 21, 22, 7, 24, 15, 0, 17, 2, 3, 4, 3, 4, 5, 6, 23, 8, 63, 48, 33, 50, 51, 52, 51, 52, 53, 54, 39, 56, 47, 32, 49, 34, 35, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3067(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 19, 20, 19, 20, 21, 22, 7, 24, 31, 0, 17, 2, 3, 4, 3, 4, 5, 6, 23, 8, 47, 48, 33, 50, 51, 52, 51, 52, 53, 54, 39, 56, 63, 32, 49, 34, 35, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3068(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 19, 20, 19, 20, 21, 22, 7, 24, 15, 16, 1, 2, 3, 4, 3, 4, 5, 6, 23, 8, 63, 32, 49, 50, 51, 52, 51, 52, 53, 54, 39, 56, 47, 48, 33, 34, 35, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3069(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 19, 20, 19, 20, 21, 22, 7, 24, 31, 16, 1, 2, 3, 4, 3, 4, 5, 6, 23, 8, 47, 32, 49, 50, 51, 52, 51, 52, 53, 54, 39, 56, 63, 48, 33, 34, 35, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3070(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 19, 20, 19, 20, 21, 22, 7, 24, 15, 0, 1, 2, 3, 4, 3, 4, 5, 6, 23, 8, 63, 48, 49, 50, 51, 52, 51, 52, 53, 54, 39, 56, 47, 32, 33, 34, 35, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3071(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 19, 20, 19, 20, 21, 22, 7, 24, 31, 0, 1, 2, 3, 4, 3, 4, 5, 6, 23, 8, 47, 48, 49, 50, 51, 52, 51, 52, 53, 54, 39, 56, 63, 32, 33, 34, 35, 36, 35, 36, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys);
}
#endif // DESGPU_COMPILE_ALL_SALTS
| 66b5d82a3c2d5bb5530bfe7b2ccf5e5790f6547b.cu | #include "des_kernel_encrypt.h"
#include "des_kernel_salt_instances.h"
#ifdef DESGPU_COMPILE_ALL_SALTS
void des_25_encrypt_salt2944(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 2, 3, 4, 3, 20, 21, 22, 7, 24, 15, 16, 17, 18, 19, 20, 19, 4, 5, 6, 23, 8, 63, 32, 33, 34, 35, 36, 35, 52, 53, 54, 39, 56, 47, 48, 49, 50, 51, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2945(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 2, 3, 4, 3, 20, 21, 22, 7, 24, 31, 16, 17, 18, 19, 20, 19, 4, 5, 6, 23, 8, 47, 32, 33, 34, 35, 36, 35, 52, 53, 54, 39, 56, 63, 48, 49, 50, 51, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2946(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 2, 3, 4, 3, 20, 21, 22, 7, 24, 15, 0, 17, 18, 19, 20, 19, 4, 5, 6, 23, 8, 63, 48, 33, 34, 35, 36, 35, 52, 53, 54, 39, 56, 47, 32, 49, 50, 51, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2947(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 2, 3, 4, 3, 20, 21, 22, 7, 24, 31, 0, 17, 18, 19, 20, 19, 4, 5, 6, 23, 8, 47, 48, 33, 34, 35, 36, 35, 52, 53, 54, 39, 56, 63, 32, 49, 50, 51, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2948(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 2, 3, 4, 3, 20, 21, 22, 7, 24, 15, 16, 1, 18, 19, 20, 19, 4, 5, 6, 23, 8, 63, 32, 49, 34, 35, 36, 35, 52, 53, 54, 39, 56, 47, 48, 33, 50, 51, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2949(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 2, 3, 4, 3, 20, 21, 22, 7, 24, 31, 16, 1, 18, 19, 20, 19, 4, 5, 6, 23, 8, 47, 32, 49, 34, 35, 36, 35, 52, 53, 54, 39, 56, 63, 48, 33, 50, 51, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2950(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 2, 3, 4, 3, 20, 21, 22, 7, 24, 15, 0, 1, 18, 19, 20, 19, 4, 5, 6, 23, 8, 63, 48, 49, 34, 35, 36, 35, 52, 53, 54, 39, 56, 47, 32, 33, 50, 51, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2951(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 2, 3, 4, 3, 20, 21, 22, 7, 24, 31, 0, 1, 18, 19, 20, 19, 4, 5, 6, 23, 8, 47, 48, 49, 34, 35, 36, 35, 52, 53, 54, 39, 56, 63, 32, 33, 50, 51, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2952(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 18, 3, 4, 3, 20, 21, 22, 7, 24, 15, 16, 17, 2, 19, 20, 19, 4, 5, 6, 23, 8, 63, 32, 33, 50, 35, 36, 35, 52, 53, 54, 39, 56, 47, 48, 49, 34, 51, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2953(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 18, 3, 4, 3, 20, 21, 22, 7, 24, 31, 16, 17, 2, 19, 20, 19, 4, 5, 6, 23, 8, 47, 32, 33, 50, 35, 36, 35, 52, 53, 54, 39, 56, 63, 48, 49, 34, 51, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2954(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 18, 3, 4, 3, 20, 21, 22, 7, 24, 15, 0, 17, 2, 19, 20, 19, 4, 5, 6, 23, 8, 63, 48, 33, 50, 35, 36, 35, 52, 53, 54, 39, 56, 47, 32, 49, 34, 51, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2955(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 18, 3, 4, 3, 20, 21, 22, 7, 24, 31, 0, 17, 2, 19, 20, 19, 4, 5, 6, 23, 8, 47, 48, 33, 50, 35, 36, 35, 52, 53, 54, 39, 56, 63, 32, 49, 34, 51, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2956(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 18, 3, 4, 3, 20, 21, 22, 7, 24, 15, 16, 1, 2, 19, 20, 19, 4, 5, 6, 23, 8, 63, 32, 49, 50, 35, 36, 35, 52, 53, 54, 39, 56, 47, 48, 33, 34, 51, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2957(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 18, 3, 4, 3, 20, 21, 22, 7, 24, 31, 16, 1, 2, 19, 20, 19, 4, 5, 6, 23, 8, 47, 32, 49, 50, 35, 36, 35, 52, 53, 54, 39, 56, 63, 48, 33, 34, 51, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2958(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 18, 3, 4, 3, 20, 21, 22, 7, 24, 15, 0, 1, 2, 19, 20, 19, 4, 5, 6, 23, 8, 63, 48, 49, 50, 35, 36, 35, 52, 53, 54, 39, 56, 47, 32, 33, 34, 51, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2959(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 18, 3, 4, 3, 20, 21, 22, 7, 24, 31, 0, 1, 2, 19, 20, 19, 4, 5, 6, 23, 8, 47, 48, 49, 50, 35, 36, 35, 52, 53, 54, 39, 56, 63, 32, 33, 34, 51, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2960(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 2, 19, 4, 3, 20, 21, 22, 7, 24, 15, 16, 17, 18, 3, 20, 19, 4, 5, 6, 23, 8, 63, 32, 33, 34, 51, 36, 35, 52, 53, 54, 39, 56, 47, 48, 49, 50, 35, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2961(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 2, 19, 4, 3, 20, 21, 22, 7, 24, 31, 16, 17, 18, 3, 20, 19, 4, 5, 6, 23, 8, 47, 32, 33, 34, 51, 36, 35, 52, 53, 54, 39, 56, 63, 48, 49, 50, 35, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2962(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 2, 19, 4, 3, 20, 21, 22, 7, 24, 15, 0, 17, 18, 3, 20, 19, 4, 5, 6, 23, 8, 63, 48, 33, 34, 51, 36, 35, 52, 53, 54, 39, 56, 47, 32, 49, 50, 35, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2963(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 2, 19, 4, 3, 20, 21, 22, 7, 24, 31, 0, 17, 18, 3, 20, 19, 4, 5, 6, 23, 8, 47, 48, 33, 34, 51, 36, 35, 52, 53, 54, 39, 56, 63, 32, 49, 50, 35, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2964(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 2, 19, 4, 3, 20, 21, 22, 7, 24, 15, 16, 1, 18, 3, 20, 19, 4, 5, 6, 23, 8, 63, 32, 49, 34, 51, 36, 35, 52, 53, 54, 39, 56, 47, 48, 33, 50, 35, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2965(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 2, 19, 4, 3, 20, 21, 22, 7, 24, 31, 16, 1, 18, 3, 20, 19, 4, 5, 6, 23, 8, 47, 32, 49, 34, 51, 36, 35, 52, 53, 54, 39, 56, 63, 48, 33, 50, 35, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2966(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 2, 19, 4, 3, 20, 21, 22, 7, 24, 15, 0, 1, 18, 3, 20, 19, 4, 5, 6, 23, 8, 63, 48, 49, 34, 51, 36, 35, 52, 53, 54, 39, 56, 47, 32, 33, 50, 35, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2967(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 2, 19, 4, 3, 20, 21, 22, 7, 24, 31, 0, 1, 18, 3, 20, 19, 4, 5, 6, 23, 8, 47, 48, 49, 34, 51, 36, 35, 52, 53, 54, 39, 56, 63, 32, 33, 50, 35, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2968(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 18, 19, 4, 3, 20, 21, 22, 7, 24, 15, 16, 17, 2, 3, 20, 19, 4, 5, 6, 23, 8, 63, 32, 33, 50, 51, 36, 35, 52, 53, 54, 39, 56, 47, 48, 49, 34, 35, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2969(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 18, 19, 4, 3, 20, 21, 22, 7, 24, 31, 16, 17, 2, 3, 20, 19, 4, 5, 6, 23, 8, 47, 32, 33, 50, 51, 36, 35, 52, 53, 54, 39, 56, 63, 48, 49, 34, 35, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2970(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 18, 19, 4, 3, 20, 21, 22, 7, 24, 15, 0, 17, 2, 3, 20, 19, 4, 5, 6, 23, 8, 63, 48, 33, 50, 51, 36, 35, 52, 53, 54, 39, 56, 47, 32, 49, 34, 35, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2971(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 18, 19, 4, 3, 20, 21, 22, 7, 24, 31, 0, 17, 2, 3, 20, 19, 4, 5, 6, 23, 8, 47, 48, 33, 50, 51, 36, 35, 52, 53, 54, 39, 56, 63, 32, 49, 34, 35, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2972(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 18, 19, 4, 3, 20, 21, 22, 7, 24, 15, 16, 1, 2, 3, 20, 19, 4, 5, 6, 23, 8, 63, 32, 49, 50, 51, 36, 35, 52, 53, 54, 39, 56, 47, 48, 33, 34, 35, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2973(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 18, 19, 4, 3, 20, 21, 22, 7, 24, 31, 16, 1, 2, 3, 20, 19, 4, 5, 6, 23, 8, 47, 32, 49, 50, 51, 36, 35, 52, 53, 54, 39, 56, 63, 48, 33, 34, 35, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2974(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 18, 19, 4, 3, 20, 21, 22, 7, 24, 15, 0, 1, 2, 3, 20, 19, 4, 5, 6, 23, 8, 63, 48, 49, 50, 51, 36, 35, 52, 53, 54, 39, 56, 47, 32, 33, 34, 35, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2975(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 18, 19, 4, 3, 20, 21, 22, 7, 24, 31, 0, 1, 2, 3, 20, 19, 4, 5, 6, 23, 8, 47, 48, 49, 50, 51, 36, 35, 52, 53, 54, 39, 56, 63, 32, 33, 34, 35, 52, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2976(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 2, 3, 20, 3, 20, 21, 22, 7, 24, 15, 16, 17, 18, 19, 4, 19, 4, 5, 6, 23, 8, 63, 32, 33, 34, 35, 52, 35, 52, 53, 54, 39, 56, 47, 48, 49, 50, 51, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2977(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 2, 3, 20, 3, 20, 21, 22, 7, 24, 31, 16, 17, 18, 19, 4, 19, 4, 5, 6, 23, 8, 47, 32, 33, 34, 35, 52, 35, 52, 53, 54, 39, 56, 63, 48, 49, 50, 51, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2978(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 2, 3, 20, 3, 20, 21, 22, 7, 24, 15, 0, 17, 18, 19, 4, 19, 4, 5, 6, 23, 8, 63, 48, 33, 34, 35, 52, 35, 52, 53, 54, 39, 56, 47, 32, 49, 50, 51, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2979(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 2, 3, 20, 3, 20, 21, 22, 7, 24, 31, 0, 17, 18, 19, 4, 19, 4, 5, 6, 23, 8, 47, 48, 33, 34, 35, 52, 35, 52, 53, 54, 39, 56, 63, 32, 49, 50, 51, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2980(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 2, 3, 20, 3, 20, 21, 22, 7, 24, 15, 16, 1, 18, 19, 4, 19, 4, 5, 6, 23, 8, 63, 32, 49, 34, 35, 52, 35, 52, 53, 54, 39, 56, 47, 48, 33, 50, 51, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2981(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 2, 3, 20, 3, 20, 21, 22, 7, 24, 31, 16, 1, 18, 19, 4, 19, 4, 5, 6, 23, 8, 47, 32, 49, 34, 35, 52, 35, 52, 53, 54, 39, 56, 63, 48, 33, 50, 51, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2982(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 2, 3, 20, 3, 20, 21, 22, 7, 24, 15, 0, 1, 18, 19, 4, 19, 4, 5, 6, 23, 8, 63, 48, 49, 34, 35, 52, 35, 52, 53, 54, 39, 56, 47, 32, 33, 50, 51, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2983(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 2, 3, 20, 3, 20, 21, 22, 7, 24, 31, 0, 1, 18, 19, 4, 19, 4, 5, 6, 23, 8, 47, 48, 49, 34, 35, 52, 35, 52, 53, 54, 39, 56, 63, 32, 33, 50, 51, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2984(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 18, 3, 20, 3, 20, 21, 22, 7, 24, 15, 16, 17, 2, 19, 4, 19, 4, 5, 6, 23, 8, 63, 32, 33, 50, 35, 52, 35, 52, 53, 54, 39, 56, 47, 48, 49, 34, 51, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2985(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 18, 3, 20, 3, 20, 21, 22, 7, 24, 31, 16, 17, 2, 19, 4, 19, 4, 5, 6, 23, 8, 47, 32, 33, 50, 35, 52, 35, 52, 53, 54, 39, 56, 63, 48, 49, 34, 51, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2986(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 18, 3, 20, 3, 20, 21, 22, 7, 24, 15, 0, 17, 2, 19, 4, 19, 4, 5, 6, 23, 8, 63, 48, 33, 50, 35, 52, 35, 52, 53, 54, 39, 56, 47, 32, 49, 34, 51, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2987(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 18, 3, 20, 3, 20, 21, 22, 7, 24, 31, 0, 17, 2, 19, 4, 19, 4, 5, 6, 23, 8, 47, 48, 33, 50, 35, 52, 35, 52, 53, 54, 39, 56, 63, 32, 49, 34, 51, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2988(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 18, 3, 20, 3, 20, 21, 22, 7, 24, 15, 16, 1, 2, 19, 4, 19, 4, 5, 6, 23, 8, 63, 32, 49, 50, 35, 52, 35, 52, 53, 54, 39, 56, 47, 48, 33, 34, 51, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2989(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 18, 3, 20, 3, 20, 21, 22, 7, 24, 31, 16, 1, 2, 19, 4, 19, 4, 5, 6, 23, 8, 47, 32, 49, 50, 35, 52, 35, 52, 53, 54, 39, 56, 63, 48, 33, 34, 51, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2990(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 18, 3, 20, 3, 20, 21, 22, 7, 24, 15, 0, 1, 2, 19, 4, 19, 4, 5, 6, 23, 8, 63, 48, 49, 50, 35, 52, 35, 52, 53, 54, 39, 56, 47, 32, 33, 34, 51, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2991(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 18, 3, 20, 3, 20, 21, 22, 7, 24, 31, 0, 1, 2, 19, 4, 19, 4, 5, 6, 23, 8, 47, 48, 49, 50, 35, 52, 35, 52, 53, 54, 39, 56, 63, 32, 33, 34, 51, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2992(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 2, 19, 20, 3, 20, 21, 22, 7, 24, 15, 16, 17, 18, 3, 4, 19, 4, 5, 6, 23, 8, 63, 32, 33, 34, 51, 52, 35, 52, 53, 54, 39, 56, 47, 48, 49, 50, 35, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2993(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 2, 19, 20, 3, 20, 21, 22, 7, 24, 31, 16, 17, 18, 3, 4, 19, 4, 5, 6, 23, 8, 47, 32, 33, 34, 51, 52, 35, 52, 53, 54, 39, 56, 63, 48, 49, 50, 35, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2994(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 2, 19, 20, 3, 20, 21, 22, 7, 24, 15, 0, 17, 18, 3, 4, 19, 4, 5, 6, 23, 8, 63, 48, 33, 34, 51, 52, 35, 52, 53, 54, 39, 56, 47, 32, 49, 50, 35, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2995(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 2, 19, 20, 3, 20, 21, 22, 7, 24, 31, 0, 17, 18, 3, 4, 19, 4, 5, 6, 23, 8, 47, 48, 33, 34, 51, 52, 35, 52, 53, 54, 39, 56, 63, 32, 49, 50, 35, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2996(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 2, 19, 20, 3, 20, 21, 22, 7, 24, 15, 16, 1, 18, 3, 4, 19, 4, 5, 6, 23, 8, 63, 32, 49, 34, 51, 52, 35, 52, 53, 54, 39, 56, 47, 48, 33, 50, 35, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2997(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 2, 19, 20, 3, 20, 21, 22, 7, 24, 31, 16, 1, 18, 3, 4, 19, 4, 5, 6, 23, 8, 47, 32, 49, 34, 51, 52, 35, 52, 53, 54, 39, 56, 63, 48, 33, 50, 35, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2998(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 2, 19, 20, 3, 20, 21, 22, 7, 24, 15, 0, 1, 18, 3, 4, 19, 4, 5, 6, 23, 8, 63, 48, 49, 34, 51, 52, 35, 52, 53, 54, 39, 56, 47, 32, 33, 50, 35, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt2999(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 2, 19, 20, 3, 20, 21, 22, 7, 24, 31, 0, 1, 18, 3, 4, 19, 4, 5, 6, 23, 8, 47, 48, 49, 34, 51, 52, 35, 52, 53, 54, 39, 56, 63, 32, 33, 50, 35, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3000(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 18, 19, 20, 3, 20, 21, 22, 7, 24, 15, 16, 17, 2, 3, 4, 19, 4, 5, 6, 23, 8, 63, 32, 33, 50, 51, 52, 35, 52, 53, 54, 39, 56, 47, 48, 49, 34, 35, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3001(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 18, 19, 20, 3, 20, 21, 22, 7, 24, 31, 16, 17, 2, 3, 4, 19, 4, 5, 6, 23, 8, 47, 32, 33, 50, 51, 52, 35, 52, 53, 54, 39, 56, 63, 48, 49, 34, 35, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3002(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 18, 19, 20, 3, 20, 21, 22, 7, 24, 15, 0, 17, 2, 3, 4, 19, 4, 5, 6, 23, 8, 63, 48, 33, 50, 51, 52, 35, 52, 53, 54, 39, 56, 47, 32, 49, 34, 35, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3003(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 18, 19, 20, 3, 20, 21, 22, 7, 24, 31, 0, 17, 2, 3, 4, 19, 4, 5, 6, 23, 8, 47, 48, 33, 50, 51, 52, 35, 52, 53, 54, 39, 56, 63, 32, 49, 34, 35, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3004(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 18, 19, 20, 3, 20, 21, 22, 7, 24, 15, 16, 1, 2, 3, 4, 19, 4, 5, 6, 23, 8, 63, 32, 49, 50, 51, 52, 35, 52, 53, 54, 39, 56, 47, 48, 33, 34, 35, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3005(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 18, 19, 20, 3, 20, 21, 22, 7, 24, 31, 16, 1, 2, 3, 4, 19, 4, 5, 6, 23, 8, 47, 32, 49, 50, 51, 52, 35, 52, 53, 54, 39, 56, 63, 48, 33, 34, 35, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3006(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 18, 19, 20, 3, 20, 21, 22, 7, 24, 15, 0, 1, 2, 3, 4, 19, 4, 5, 6, 23, 8, 63, 48, 49, 50, 51, 52, 35, 52, 53, 54, 39, 56, 47, 32, 33, 34, 35, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3007(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 18, 19, 20, 3, 20, 21, 22, 7, 24, 31, 0, 1, 2, 3, 4, 19, 4, 5, 6, 23, 8, 47, 48, 49, 50, 51, 52, 35, 52, 53, 54, 39, 56, 63, 32, 33, 34, 35, 36, 51, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3008(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 2, 3, 4, 19, 20, 21, 22, 7, 24, 15, 16, 17, 18, 19, 20, 3, 4, 5, 6, 23, 8, 63, 32, 33, 34, 35, 36, 51, 52, 53, 54, 39, 56, 47, 48, 49, 50, 51, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3009(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 2, 3, 4, 19, 20, 21, 22, 7, 24, 31, 16, 17, 18, 19, 20, 3, 4, 5, 6, 23, 8, 47, 32, 33, 34, 35, 36, 51, 52, 53, 54, 39, 56, 63, 48, 49, 50, 51, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3010(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 2, 3, 4, 19, 20, 21, 22, 7, 24, 15, 0, 17, 18, 19, 20, 3, 4, 5, 6, 23, 8, 63, 48, 33, 34, 35, 36, 51, 52, 53, 54, 39, 56, 47, 32, 49, 50, 51, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3011(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 2, 3, 4, 19, 20, 21, 22, 7, 24, 31, 0, 17, 18, 19, 20, 3, 4, 5, 6, 23, 8, 47, 48, 33, 34, 35, 36, 51, 52, 53, 54, 39, 56, 63, 32, 49, 50, 51, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3012(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 2, 3, 4, 19, 20, 21, 22, 7, 24, 15, 16, 1, 18, 19, 20, 3, 4, 5, 6, 23, 8, 63, 32, 49, 34, 35, 36, 51, 52, 53, 54, 39, 56, 47, 48, 33, 50, 51, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3013(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 2, 3, 4, 19, 20, 21, 22, 7, 24, 31, 16, 1, 18, 19, 20, 3, 4, 5, 6, 23, 8, 47, 32, 49, 34, 35, 36, 51, 52, 53, 54, 39, 56, 63, 48, 33, 50, 51, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3014(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 2, 3, 4, 19, 20, 21, 22, 7, 24, 15, 0, 1, 18, 19, 20, 3, 4, 5, 6, 23, 8, 63, 48, 49, 34, 35, 36, 51, 52, 53, 54, 39, 56, 47, 32, 33, 50, 51, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3015(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 2, 3, 4, 19, 20, 21, 22, 7, 24, 31, 0, 1, 18, 19, 20, 3, 4, 5, 6, 23, 8, 47, 48, 49, 34, 35, 36, 51, 52, 53, 54, 39, 56, 63, 32, 33, 50, 51, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3016(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 18, 3, 4, 19, 20, 21, 22, 7, 24, 15, 16, 17, 2, 19, 20, 3, 4, 5, 6, 23, 8, 63, 32, 33, 50, 35, 36, 51, 52, 53, 54, 39, 56, 47, 48, 49, 34, 51, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3017(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 18, 3, 4, 19, 20, 21, 22, 7, 24, 31, 16, 17, 2, 19, 20, 3, 4, 5, 6, 23, 8, 47, 32, 33, 50, 35, 36, 51, 52, 53, 54, 39, 56, 63, 48, 49, 34, 51, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3018(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 18, 3, 4, 19, 20, 21, 22, 7, 24, 15, 0, 17, 2, 19, 20, 3, 4, 5, 6, 23, 8, 63, 48, 33, 50, 35, 36, 51, 52, 53, 54, 39, 56, 47, 32, 49, 34, 51, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3019(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 18, 3, 4, 19, 20, 21, 22, 7, 24, 31, 0, 17, 2, 19, 20, 3, 4, 5, 6, 23, 8, 47, 48, 33, 50, 35, 36, 51, 52, 53, 54, 39, 56, 63, 32, 49, 34, 51, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3020(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 18, 3, 4, 19, 20, 21, 22, 7, 24, 15, 16, 1, 2, 19, 20, 3, 4, 5, 6, 23, 8, 63, 32, 49, 50, 35, 36, 51, 52, 53, 54, 39, 56, 47, 48, 33, 34, 51, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3021(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 18, 3, 4, 19, 20, 21, 22, 7, 24, 31, 16, 1, 2, 19, 20, 3, 4, 5, 6, 23, 8, 47, 32, 49, 50, 35, 36, 51, 52, 53, 54, 39, 56, 63, 48, 33, 34, 51, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3022(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 18, 3, 4, 19, 20, 21, 22, 7, 24, 15, 0, 1, 2, 19, 20, 3, 4, 5, 6, 23, 8, 63, 48, 49, 50, 35, 36, 51, 52, 53, 54, 39, 56, 47, 32, 33, 34, 51, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3023(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 18, 3, 4, 19, 20, 21, 22, 7, 24, 31, 0, 1, 2, 19, 20, 3, 4, 5, 6, 23, 8, 47, 48, 49, 50, 35, 36, 51, 52, 53, 54, 39, 56, 63, 32, 33, 34, 51, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3024(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 2, 19, 4, 19, 20, 21, 22, 7, 24, 15, 16, 17, 18, 3, 20, 3, 4, 5, 6, 23, 8, 63, 32, 33, 34, 51, 36, 51, 52, 53, 54, 39, 56, 47, 48, 49, 50, 35, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3025(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 2, 19, 4, 19, 20, 21, 22, 7, 24, 31, 16, 17, 18, 3, 20, 3, 4, 5, 6, 23, 8, 47, 32, 33, 34, 51, 36, 51, 52, 53, 54, 39, 56, 63, 48, 49, 50, 35, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3026(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 2, 19, 4, 19, 20, 21, 22, 7, 24, 15, 0, 17, 18, 3, 20, 3, 4, 5, 6, 23, 8, 63, 48, 33, 34, 51, 36, 51, 52, 53, 54, 39, 56, 47, 32, 49, 50, 35, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3027(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 2, 19, 4, 19, 20, 21, 22, 7, 24, 31, 0, 17, 18, 3, 20, 3, 4, 5, 6, 23, 8, 47, 48, 33, 34, 51, 36, 51, 52, 53, 54, 39, 56, 63, 32, 49, 50, 35, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3028(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 2, 19, 4, 19, 20, 21, 22, 7, 24, 15, 16, 1, 18, 3, 20, 3, 4, 5, 6, 23, 8, 63, 32, 49, 34, 51, 36, 51, 52, 53, 54, 39, 56, 47, 48, 33, 50, 35, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3029(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 2, 19, 4, 19, 20, 21, 22, 7, 24, 31, 16, 1, 18, 3, 20, 3, 4, 5, 6, 23, 8, 47, 32, 49, 34, 51, 36, 51, 52, 53, 54, 39, 56, 63, 48, 33, 50, 35, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3030(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 2, 19, 4, 19, 20, 21, 22, 7, 24, 15, 0, 1, 18, 3, 20, 3, 4, 5, 6, 23, 8, 63, 48, 49, 34, 51, 36, 51, 52, 53, 54, 39, 56, 47, 32, 33, 50, 35, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3031(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 2, 19, 4, 19, 20, 21, 22, 7, 24, 31, 0, 1, 18, 3, 20, 3, 4, 5, 6, 23, 8, 47, 48, 49, 34, 51, 36, 51, 52, 53, 54, 39, 56, 63, 32, 33, 50, 35, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3032(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 18, 19, 4, 19, 20, 21, 22, 7, 24, 15, 16, 17, 2, 3, 20, 3, 4, 5, 6, 23, 8, 63, 32, 33, 50, 51, 36, 51, 52, 53, 54, 39, 56, 47, 48, 49, 34, 35, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3033(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 18, 19, 4, 19, 20, 21, 22, 7, 24, 31, 16, 17, 2, 3, 20, 3, 4, 5, 6, 23, 8, 47, 32, 33, 50, 51, 36, 51, 52, 53, 54, 39, 56, 63, 48, 49, 34, 35, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3034(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 18, 19, 4, 19, 20, 21, 22, 7, 24, 15, 0, 17, 2, 3, 20, 3, 4, 5, 6, 23, 8, 63, 48, 33, 50, 51, 36, 51, 52, 53, 54, 39, 56, 47, 32, 49, 34, 35, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3035(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 18, 19, 4, 19, 20, 21, 22, 7, 24, 31, 0, 17, 2, 3, 20, 3, 4, 5, 6, 23, 8, 47, 48, 33, 50, 51, 36, 51, 52, 53, 54, 39, 56, 63, 32, 49, 34, 35, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3036(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 18, 19, 4, 19, 20, 21, 22, 7, 24, 15, 16, 1, 2, 3, 20, 3, 4, 5, 6, 23, 8, 63, 32, 49, 50, 51, 36, 51, 52, 53, 54, 39, 56, 47, 48, 33, 34, 35, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3037(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 18, 19, 4, 19, 20, 21, 22, 7, 24, 31, 16, 1, 2, 3, 20, 3, 4, 5, 6, 23, 8, 47, 32, 49, 50, 51, 36, 51, 52, 53, 54, 39, 56, 63, 48, 33, 34, 35, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3038(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 18, 19, 4, 19, 20, 21, 22, 7, 24, 15, 0, 1, 2, 3, 20, 3, 4, 5, 6, 23, 8, 63, 48, 49, 50, 51, 36, 51, 52, 53, 54, 39, 56, 47, 32, 33, 34, 35, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3039(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 18, 19, 4, 19, 20, 21, 22, 7, 24, 31, 0, 1, 2, 3, 20, 3, 4, 5, 6, 23, 8, 47, 48, 49, 50, 51, 36, 51, 52, 53, 54, 39, 56, 63, 32, 33, 34, 35, 52, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3040(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 2, 3, 20, 19, 20, 21, 22, 7, 24, 15, 16, 17, 18, 19, 4, 3, 4, 5, 6, 23, 8, 63, 32, 33, 34, 35, 52, 51, 52, 53, 54, 39, 56, 47, 48, 49, 50, 51, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3041(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 2, 3, 20, 19, 20, 21, 22, 7, 24, 31, 16, 17, 18, 19, 4, 3, 4, 5, 6, 23, 8, 47, 32, 33, 34, 35, 52, 51, 52, 53, 54, 39, 56, 63, 48, 49, 50, 51, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3042(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 2, 3, 20, 19, 20, 21, 22, 7, 24, 15, 0, 17, 18, 19, 4, 3, 4, 5, 6, 23, 8, 63, 48, 33, 34, 35, 52, 51, 52, 53, 54, 39, 56, 47, 32, 49, 50, 51, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3043(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 2, 3, 20, 19, 20, 21, 22, 7, 24, 31, 0, 17, 18, 19, 4, 3, 4, 5, 6, 23, 8, 47, 48, 33, 34, 35, 52, 51, 52, 53, 54, 39, 56, 63, 32, 49, 50, 51, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3044(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 2, 3, 20, 19, 20, 21, 22, 7, 24, 15, 16, 1, 18, 19, 4, 3, 4, 5, 6, 23, 8, 63, 32, 49, 34, 35, 52, 51, 52, 53, 54, 39, 56, 47, 48, 33, 50, 51, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3045(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 2, 3, 20, 19, 20, 21, 22, 7, 24, 31, 16, 1, 18, 19, 4, 3, 4, 5, 6, 23, 8, 47, 32, 49, 34, 35, 52, 51, 52, 53, 54, 39, 56, 63, 48, 33, 50, 51, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3046(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 2, 3, 20, 19, 20, 21, 22, 7, 24, 15, 0, 1, 18, 19, 4, 3, 4, 5, 6, 23, 8, 63, 48, 49, 34, 35, 52, 51, 52, 53, 54, 39, 56, 47, 32, 33, 50, 51, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3047(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 2, 3, 20, 19, 20, 21, 22, 7, 24, 31, 0, 1, 18, 19, 4, 3, 4, 5, 6, 23, 8, 47, 48, 49, 34, 35, 52, 51, 52, 53, 54, 39, 56, 63, 32, 33, 50, 51, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3048(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 18, 3, 20, 19, 20, 21, 22, 7, 24, 15, 16, 17, 2, 19, 4, 3, 4, 5, 6, 23, 8, 63, 32, 33, 50, 35, 52, 51, 52, 53, 54, 39, 56, 47, 48, 49, 34, 51, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3049(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 18, 3, 20, 19, 20, 21, 22, 7, 24, 31, 16, 17, 2, 19, 4, 3, 4, 5, 6, 23, 8, 47, 32, 33, 50, 35, 52, 51, 52, 53, 54, 39, 56, 63, 48, 49, 34, 51, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3050(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 18, 3, 20, 19, 20, 21, 22, 7, 24, 15, 0, 17, 2, 19, 4, 3, 4, 5, 6, 23, 8, 63, 48, 33, 50, 35, 52, 51, 52, 53, 54, 39, 56, 47, 32, 49, 34, 51, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3051(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 18, 3, 20, 19, 20, 21, 22, 7, 24, 31, 0, 17, 2, 19, 4, 3, 4, 5, 6, 23, 8, 47, 48, 33, 50, 35, 52, 51, 52, 53, 54, 39, 56, 63, 32, 49, 34, 51, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3052(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 18, 3, 20, 19, 20, 21, 22, 7, 24, 15, 16, 1, 2, 19, 4, 3, 4, 5, 6, 23, 8, 63, 32, 49, 50, 35, 52, 51, 52, 53, 54, 39, 56, 47, 48, 33, 34, 51, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3053(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 18, 3, 20, 19, 20, 21, 22, 7, 24, 31, 16, 1, 2, 19, 4, 3, 4, 5, 6, 23, 8, 47, 32, 49, 50, 35, 52, 51, 52, 53, 54, 39, 56, 63, 48, 33, 34, 51, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3054(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 18, 3, 20, 19, 20, 21, 22, 7, 24, 15, 0, 1, 2, 19, 4, 3, 4, 5, 6, 23, 8, 63, 48, 49, 50, 35, 52, 51, 52, 53, 54, 39, 56, 47, 32, 33, 34, 51, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3055(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 18, 3, 20, 19, 20, 21, 22, 7, 24, 31, 0, 1, 2, 19, 4, 3, 4, 5, 6, 23, 8, 47, 48, 49, 50, 35, 52, 51, 52, 53, 54, 39, 56, 63, 32, 33, 34, 51, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3056(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 2, 19, 20, 19, 20, 21, 22, 7, 24, 15, 16, 17, 18, 3, 4, 3, 4, 5, 6, 23, 8, 63, 32, 33, 34, 51, 52, 51, 52, 53, 54, 39, 56, 47, 48, 49, 50, 35, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3057(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 2, 19, 20, 19, 20, 21, 22, 7, 24, 31, 16, 17, 18, 3, 4, 3, 4, 5, 6, 23, 8, 47, 32, 33, 34, 51, 52, 51, 52, 53, 54, 39, 56, 63, 48, 49, 50, 35, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3058(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 2, 19, 20, 19, 20, 21, 22, 7, 24, 15, 0, 17, 18, 3, 4, 3, 4, 5, 6, 23, 8, 63, 48, 33, 34, 51, 52, 51, 52, 53, 54, 39, 56, 47, 32, 49, 50, 35, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3059(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 2, 19, 20, 19, 20, 21, 22, 7, 24, 31, 0, 17, 18, 3, 4, 3, 4, 5, 6, 23, 8, 47, 48, 33, 34, 51, 52, 51, 52, 53, 54, 39, 56, 63, 32, 49, 50, 35, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3060(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 2, 19, 20, 19, 20, 21, 22, 7, 24, 15, 16, 1, 18, 3, 4, 3, 4, 5, 6, 23, 8, 63, 32, 49, 34, 51, 52, 51, 52, 53, 54, 39, 56, 47, 48, 33, 50, 35, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3061(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 2, 19, 20, 19, 20, 21, 22, 7, 24, 31, 16, 1, 18, 3, 4, 3, 4, 5, 6, 23, 8, 47, 32, 49, 34, 51, 52, 51, 52, 53, 54, 39, 56, 63, 48, 33, 50, 35, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3062(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 2, 19, 20, 19, 20, 21, 22, 7, 24, 15, 0, 1, 18, 3, 4, 3, 4, 5, 6, 23, 8, 63, 48, 49, 34, 51, 52, 51, 52, 53, 54, 39, 56, 47, 32, 33, 50, 35, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3063(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 2, 19, 20, 19, 20, 21, 22, 7, 24, 31, 0, 1, 18, 3, 4, 3, 4, 5, 6, 23, 8, 47, 48, 49, 34, 51, 52, 51, 52, 53, 54, 39, 56, 63, 32, 33, 50, 35, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3064(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 1, 18, 19, 20, 19, 20, 21, 22, 7, 24, 15, 16, 17, 2, 3, 4, 3, 4, 5, 6, 23, 8, 63, 32, 33, 50, 51, 52, 51, 52, 53, 54, 39, 56, 47, 48, 49, 34, 35, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3065(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 1, 18, 19, 20, 19, 20, 21, 22, 7, 24, 31, 16, 17, 2, 3, 4, 3, 4, 5, 6, 23, 8, 47, 32, 33, 50, 51, 52, 51, 52, 53, 54, 39, 56, 63, 48, 49, 34, 35, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3066(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 1, 18, 19, 20, 19, 20, 21, 22, 7, 24, 15, 0, 17, 2, 3, 4, 3, 4, 5, 6, 23, 8, 63, 48, 33, 50, 51, 52, 51, 52, 53, 54, 39, 56, 47, 32, 49, 34, 35, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3067(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 1, 18, 19, 20, 19, 20, 21, 22, 7, 24, 31, 0, 17, 2, 3, 4, 3, 4, 5, 6, 23, 8, 47, 48, 33, 50, 51, 52, 51, 52, 53, 54, 39, 56, 63, 32, 49, 34, 35, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3068(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 0, 17, 18, 19, 20, 19, 20, 21, 22, 7, 24, 15, 16, 1, 2, 3, 4, 3, 4, 5, 6, 23, 8, 63, 32, 49, 50, 51, 52, 51, 52, 53, 54, 39, 56, 47, 48, 33, 34, 35, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3069(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 0, 17, 18, 19, 20, 19, 20, 21, 22, 7, 24, 31, 16, 1, 2, 3, 4, 3, 4, 5, 6, 23, 8, 47, 32, 49, 50, 51, 52, 51, 52, 53, 54, 39, 56, 63, 48, 33, 34, 35, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3070(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<31, 16, 17, 18, 19, 20, 19, 20, 21, 22, 7, 24, 15, 0, 1, 2, 3, 4, 3, 4, 5, 6, 23, 8, 63, 48, 49, 50, 51, 52, 51, 52, 53, 54, 39, 56, 47, 32, 33, 34, 35, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
void des_25_encrypt_salt3071(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys)
{
des_25_encrypt<15, 16, 17, 18, 19, 20, 19, 20, 21, 22, 7, 24, 31, 0, 1, 2, 3, 4, 3, 4, 5, 6, 23, 8, 47, 48, 49, 50, 51, 52, 51, 52, 53, 54, 39, 56, 63, 32, 33, 34, 35, 36, 35, 36, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys);
}
#endif // DESGPU_COMPILE_ALL_SALTS
|
ead2449216edd0187bcc00f4749b87164a199376.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
//perform vector addition utilizing blocks and threads
__global__ void add(int *a, int *b, int *c, int n) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) //avoid accessing beyond end of array
c[index] = a[index] + b[index];
}
//populate vectors with random ints
void random_ints(int* a, int N) {
for (int i=0; i < N; i++){
a[i] = rand() % 1000;
}
}
#define N (2048*2048) // overall size of the data set
#define THREADS_PER_BLOCK 512 // threads per block
int main(void) {
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
//alloc space for device copies of a, b, and c
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
//alloc space for host copies and setup input values
a = (int *)malloc(size); random_ints(a, N);
b = (int *)malloc(size); random_ints(b, N);
c = (int *)malloc(size);
//copy inputs to device
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
//launch add() kernel, while avoid accessing beyond the end of the array
hipLaunchKernelGGL(( add), dim3((N + THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, d_a, d_b, d_c, N);
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
//clean up
free(a); free(b); free(c);
hipFree(d_a); hipFree(d_b); hipFree(d_c);
return 0;
} | ead2449216edd0187bcc00f4749b87164a199376.cu | #include <stdio.h>
#include <iostream>
//perform vector addition utilizing blocks and threads
__global__ void add(int *a, int *b, int *c, int n) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n) //avoid accessing beyond end of array
c[index] = a[index] + b[index];
}
//populate vectors with random ints
void random_ints(int* a, int N) {
for (int i=0; i < N; i++){
a[i] = rand() % 1000;
}
}
#define N (2048*2048) // overall size of the data set
#define THREADS_PER_BLOCK 512 // threads per block
int main(void) {
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
//alloc space for device copies of a, b, and c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
//alloc space for host copies and setup input values
a = (int *)malloc(size); random_ints(a, N);
b = (int *)malloc(size); random_ints(b, N);
c = (int *)malloc(size);
//copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
//launch add() kernel, while avoid accessing beyond the end of the array
add<<<(N + THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(d_a, d_b, d_c, N);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
//clean up
free(a); free(b); free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
} |
bce8ae48dfc1827d83b57f5e02cf67167e38e0b5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
// Number of elements to put in the test array
#define TEST_SIZE 16
#define NUM_BINS 10
////////////////////////////////////////////////////////////////
////////////////// COPY EVERYTHING BELOW HERE //////////////////
////////////////////////////////////////////////////////////////
// Number of threads per block (1-d blocks)
#define BLOCK_WIDTH 4
// Functions to reduce with
#define ADD 0
#define MIN 1
#define MAX 2
// Device functions
__global__ void scanKernel(unsigned int* d_cdf, unsigned int* d_input, const size_t array_size)
{
__shared__ unsigned int temp[BLOCK_WIDTH<<1];
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = BLOCK_WIDTH * bx + tx;
int offset = 1;
if(2*index + 1 < array_size) {
temp[2*index] = d_input[2*index];
temp[2*index + 1] = d_input[2*index + 1];
}
// Up-sweep
for(int powOf2 = (2*BLOCK_WIDTH)>>1; powOf2 > 0; powOf2 >>= 1) {
__syncthreads();
if(tx < powOf2) {
int idx1 = offset*(2*tx + 1) - 1 + 2*BLOCK_WIDTH*bx;
int idx2 = offset*(2*tx + 2) - 1 + 2*BLOCK_WIDTH*bx;
temp[idx2] += temp[idx1];
}
offset <<= 1;
}
__syncthreads();
///// The below will need to be remembered for multiple blocks /////
if(tx == 0) {
temp[2*BLOCK_WIDTH*(bx + 1) - 1] = 0;
}
// Down-sweep
for(int powOf2 = 1; powOf2 < 2*BLOCK_WIDTH; powOf2 <<= 1) {
offset >>= 1;
__syncthreads();
if(tx < powOf2) {
int idx1 = offset*(2*tx + 1) - 1 + 2*BLOCK_WIDTH*bx;
int idx2 = offset*(2*tx + 2) - 1 + 2*BLOCK_WIDTH*bx;
unsigned int t = temp[idx1];
temp[idx1] = temp[idx2];
temp[idx2] += t;
}
}
__syncthreads();
if(2*index + 1 < array_size) {
d_cdf[2*index] = temp[2*index];
d_cdf[2*index + 1] = temp[2*index + 1];
}
} | bce8ae48dfc1827d83b57f5e02cf67167e38e0b5.cu | #include "includes.h"
// Number of elements to put in the test array
#define TEST_SIZE 16
#define NUM_BINS 10
////////////////////////////////////////////////////////////////
////////////////// COPY EVERYTHING BELOW HERE //////////////////
////////////////////////////////////////////////////////////////
// Number of threads per block (1-d blocks)
#define BLOCK_WIDTH 4
// Functions to reduce with
#define ADD 0
#define MIN 1
#define MAX 2
// Device functions
__global__ void scanKernel(unsigned int* d_cdf, unsigned int* d_input, const size_t array_size)
{
__shared__ unsigned int temp[BLOCK_WIDTH<<1];
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = BLOCK_WIDTH * bx + tx;
int offset = 1;
if(2*index + 1 < array_size) {
temp[2*index] = d_input[2*index];
temp[2*index + 1] = d_input[2*index + 1];
}
// Up-sweep
for(int powOf2 = (2*BLOCK_WIDTH)>>1; powOf2 > 0; powOf2 >>= 1) {
__syncthreads();
if(tx < powOf2) {
int idx1 = offset*(2*tx + 1) - 1 + 2*BLOCK_WIDTH*bx;
int idx2 = offset*(2*tx + 2) - 1 + 2*BLOCK_WIDTH*bx;
temp[idx2] += temp[idx1];
}
offset <<= 1;
}
__syncthreads();
///// The below will need to be remembered for multiple blocks /////
if(tx == 0) {
temp[2*BLOCK_WIDTH*(bx + 1) - 1] = 0;
}
// Down-sweep
for(int powOf2 = 1; powOf2 < 2*BLOCK_WIDTH; powOf2 <<= 1) {
offset >>= 1;
__syncthreads();
if(tx < powOf2) {
int idx1 = offset*(2*tx + 1) - 1 + 2*BLOCK_WIDTH*bx;
int idx2 = offset*(2*tx + 2) - 1 + 2*BLOCK_WIDTH*bx;
unsigned int t = temp[idx1];
temp[idx1] = temp[idx2];
temp[idx2] += t;
}
}
__syncthreads();
if(2*index + 1 < array_size) {
d_cdf[2*index] = temp[2*index];
d_cdf[2*index + 1] = temp[2*index + 1];
}
} |
524a26d6ce261364f46451b72c355abaa55139d6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void cube(float * d_out, float * d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f*f*f;
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
hipMalloc((void**) &d_in, ARRAY_BYTES);
hipMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
// launch the kernel
hipLaunchKernelGGL(( cube), dim3(1), dim3(ARRAY_SIZE), 0, 0, d_out, d_in);
// copy back the result array to the CPU
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
hipFree(d_in);
hipFree(d_out);
return 0;
}
| 524a26d6ce261364f46451b72c355abaa55139d6.cu | #include <stdio.h>
__global__ void cube(float * d_out, float * d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f*f*f;
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
cudaMalloc((void**) &d_in, ARRAY_BYTES);
cudaMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
// launch the kernel
cube<<<1, ARRAY_SIZE>>>(d_out, d_in);
// copy back the result array to the CPU
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
1c0f73d0a8024538bb91db0915a57b5a3e94db14.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template<typename Dtype>
void SplitLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
for (int i = 0; i < top.size(); ++i) {
top[i]->ShareData(*bottom[0]);
}
}
template<typename Dtype>
void SplitLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
if (this->device_context_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
if (top.size() == 1) {
caffe_copy(count_, top[0]->gpu_diff(), bottom[0]->mutable_gpu_diff());
return;
}
caffe_gpu_add(count_, top[0]->gpu_diff(), top[1]->gpu_diff(),
bottom[0]->mutable_gpu_diff());
// Add remaining top blob diffs.
for (int i = 2; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
caffe_gpu_axpy(count_, Dtype(1.), top_diff, bottom_diff);
}
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_context_->id());
if (top.size() == 1) {
greentea_copy<Dtype>(count_, (cl_mem) (top[0]->gpu_diff()), 0,
(cl_mem) (bottom[0]->mutable_gpu_diff()), 0, &ctx);
return;
}
greentea_gpu_add<Dtype>(this->device_context_->id(), count_,
(cl_mem) (top[0]->gpu_diff()), 0,
(cl_mem) (top[1]->gpu_diff()), 0,
(cl_mem) (bottom[0]->mutable_gpu_diff()), 0);
// Add remaining top blob diffs.
for (int i = 2; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
greentea_gpu_axpy<Dtype>(this->device_context_->id(), count_, Dtype(1.),
(cl_mem) top_diff, 0, (cl_mem) bottom_diff, 0);
}
#endif // USE_GREENTEA
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SplitLayer);
} // namespace caffe
| 1c0f73d0a8024538bb91db0915a57b5a3e94db14.cu | #include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template<typename Dtype>
void SplitLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
for (int i = 0; i < top.size(); ++i) {
top[i]->ShareData(*bottom[0]);
}
}
template<typename Dtype>
void SplitLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
if (this->device_context_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
if (top.size() == 1) {
caffe_copy(count_, top[0]->gpu_diff(), bottom[0]->mutable_gpu_diff());
return;
}
caffe_gpu_add(count_, top[0]->gpu_diff(), top[1]->gpu_diff(),
bottom[0]->mutable_gpu_diff());
// Add remaining top blob diffs.
for (int i = 2; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
caffe_gpu_axpy(count_, Dtype(1.), top_diff, bottom_diff);
}
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_context_->id());
if (top.size() == 1) {
greentea_copy<Dtype>(count_, (cl_mem) (top[0]->gpu_diff()), 0,
(cl_mem) (bottom[0]->mutable_gpu_diff()), 0, &ctx);
return;
}
greentea_gpu_add<Dtype>(this->device_context_->id(), count_,
(cl_mem) (top[0]->gpu_diff()), 0,
(cl_mem) (top[1]->gpu_diff()), 0,
(cl_mem) (bottom[0]->mutable_gpu_diff()), 0);
// Add remaining top blob diffs.
for (int i = 2; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
greentea_gpu_axpy<Dtype>(this->device_context_->id(), count_, Dtype(1.),
(cl_mem) top_diff, 0, (cl_mem) bottom_diff, 0);
}
#endif // USE_GREENTEA
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SplitLayer);
} // namespace caffe
|
2d9ceb80ebe5842dd87a6440e6ecfce4a520b5d3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <hip/driver_types.h>
#include "../common/book.h"
#define CLOCKS_PAR_SEC 1000000l
#define NB_THREAD 192
#define NB_ITER 10
#define MATRIX_WIDTH 1024
#define MATRIX_HEIGHT (NB_THREAD * NB_ITER)
#define VECTOR_LENGTH MATRIX_WIDTH
__constant__ float const_x[VECTOR_LENGTH];
// Version un peu plus rapide
__global__ void matVec_V1(float *A, float *b)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
float tmp = 0;
for (int i=x; i<MATRIX_WIDTH * MATRIX_HEIGHT; i+=MATRIX_WIDTH)
{
tmp += A[i] * const_x[i/MATRIX_HEIGHT];
}
b[x] = tmp;
}
// Version un peu plus lente
__global__ void matVec_V2(float *A, float *b)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
float tmp = 0;
int offset = x * MATRIX_WIDTH;
for (int i=0; i<MATRIX_WIDTH; i++)
{
tmp += A[i+offset] * const_x[i];
}
b[x] = tmp;
}
// Version avec flux slow
__global__ void matVec(float *A, float *b)
{
register int x = threadIdx.x + blockIdx.x * blockDim.x;
register float tmp = 0;
// register int offset = x*MATRIX_WIDTH;
#pragma unroll
for (int i=0; i<MATRIX_WIDTH; i++)
{
tmp += 2.0 * const_x[i]; // A[i+offset]
}
b[x] = tmp;
}
// Initialisation d'un vecteur zero
__global__ void init_0(float *x)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
x[idx] = 0;
}
/************************************************************************/
/* Main */
/************************************************************************/
int main(int argc, char* argv[])
{
hipStream_t stream0, stream1;
hipStreamCreate(&stream0);
hipStreamCreate(&stream1);
float *host_A, *host_x, *host_b;
float *dev_A0, *dev_b;
float *dev_A1;
// allocate the memory on the Host (CPU)
HANDLE_ERROR( hipHostMalloc((void**)&host_A,
MATRIX_WIDTH * MATRIX_HEIGHT * sizeof(float),
hipHostMallocDefault ));
HANDLE_ERROR( hipHostMalloc((void**)&host_x,
VECTOR_LENGTH * sizeof(float),
hipHostMallocDefault ));
HANDLE_ERROR( hipHostMalloc((void**)&host_b,
VECTOR_LENGTH * sizeof(float),
hipHostMallocDefault ));
// allocate the memory on the GPU
HANDLE_ERROR( hipMalloc( (void**)&dev_A0,
MATRIX_WIDTH * NB_THREAD * sizeof(float) ));
HANDLE_ERROR( hipMalloc( (void**)&dev_b,
VECTOR_LENGTH * sizeof(float) ));
HANDLE_ERROR( hipMalloc( (void**)&dev_A1,
MATRIX_WIDTH * NB_THREAD * sizeof(float) ));
// init data
for (int i = 0; i < MATRIX_WIDTH * MATRIX_HEIGHT; ++i)
{
host_A[i] = (float)(i/MATRIX_WIDTH);
}
for (int i = 0; i < VECTOR_LENGTH; ++i )
{
host_x[i] = 2.0;
}
/* mesure du temps d'execution */
hipEvent_t start, stop;
float tempsGPU;
HANDLE_ERROR( hipEventCreate(&start) );
HANDLE_ERROR( hipEventCreate(&stop) );
HANDLE_ERROR( hipEventRecord(start, stream0) );
/* Copie des donnes vers le GPU */
HANDLE_ERROR( hipMemcpyToSymbol( const_x, host_x, VECTOR_LENGTH * sizeof(float), 0,
hipMemcpyHostToDevice) );
/* Initialisation des donnes sur le GPU */
hipLaunchKernelGGL(( init_0), dim3(1), dim3(VECTOR_LENGTH), 0, 0, dev_b);
dim3 ThreadPerBlock ( NB_THREAD , 1 );
dim3 BlockPerGrid ( 1 , 1 );
int offset = NB_THREAD * MATRIX_WIDTH;
for (int i=0 ; i < NB_ITER ; i+=2)
{
HANDLE_ERROR( hipMemcpyAsync(dev_A0, host_A + i*offset,
MATRIX_WIDTH * NB_THREAD * sizeof(float), hipMemcpyHostToDevice, stream0) );
HANDLE_ERROR( hipMemcpyAsync(dev_A1, host_A + (i+1)*offset,
MATRIX_WIDTH * NB_THREAD * sizeof(float), hipMemcpyHostToDevice, stream1) );
hipLaunchKernelGGL(( matVec), dim3(BlockPerGrid), dim3(ThreadPerBlock), 0, stream0, dev_A0, dev_b + i*NB_THREAD);
hipLaunchKernelGGL(( matVec), dim3(BlockPerGrid), dim3(ThreadPerBlock), 0, stream1, dev_A1, dev_b + (i+1)*NB_THREAD);
}
HANDLE_ERROR( hipStreamSynchronize(stream0));
HANDLE_ERROR( hipStreamSynchronize(stream1));
HANDLE_ERROR( hipMemcpy( host_b, dev_b, VECTOR_LENGTH * sizeof(float), hipMemcpyDeviceToHost));
/* Fin de la mesure du temps d'execution du programme */
hipEventRecord(stop, stream0);
hipEventSynchronize( stop );
hipEventElapsedTime(&tempsGPU, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
hipFree( dev_A0 );
hipFree( dev_A1 );
hipFree( dev_b );
hipStreamDestroy(stream0);
/* vrification des rsultats */
printf("Resultats calcul GPU : \n");
for (int i=0; i < VECTOR_LENGTH/NB_THREAD; i++)
{
printf("b[%3d] = %5.1f \n", i*NB_THREAD, host_b[i*NB_THREAD] );
}
/* affichage du temps d'execution */
printf("temps coule sur GPU : %f ms \n\n", tempsGPU);
/**********************************************
execution de la mme opration sur CPU
**********************************************/
int k=1;
clock_t t1, t2;
double tempsCPU;
t1 = clock();
/* execution de l'opration sur CPU */
for (k=0; k<50; k++)
{
for (int i=0; i<MATRIX_HEIGHT; i++)
{
host_b[i] = 0;
for (int j=0; j<MATRIX_WIDTH; j++)
{
host_b[i] += host_A[i*MATRIX_WIDTH + j] * host_x[j];
}
}
}
for (int i=0; i < VECTOR_LENGTH/NB_THREAD; i++)
{
printf("b[%3d] = %5.1f \n", i*NB_THREAD, host_b[i*NB_THREAD] );
}
t2 = clock();
tempsCPU = (double)difftime(t2, t1)/(double)CLOCKS_PAR_SEC;
/* affichage du temps d'execution */
tempsCPU = tempsCPU * 1000.0 / k;
printf("temps coule sur CPU: %f ms \n\n", tempsCPU);
printf("Speedup : %3.2fx \n", tempsCPU / tempsGPU);
hipHostFree(host_A);
hipHostFree(host_b);
return EXIT_SUCCESS;
}
| 2d9ceb80ebe5842dd87a6440e6ecfce4a520b5d3.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <driver_types.h>
#include "../common/book.h"
#define CLOCKS_PAR_SEC 1000000l
#define NB_THREAD 192
#define NB_ITER 10
#define MATRIX_WIDTH 1024
#define MATRIX_HEIGHT (NB_THREAD * NB_ITER)
#define VECTOR_LENGTH MATRIX_WIDTH
__constant__ float const_x[VECTOR_LENGTH];
// Version un peu plus rapide
__global__ void matVec_V1(float *A, float *b)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
float tmp = 0;
for (int i=x; i<MATRIX_WIDTH * MATRIX_HEIGHT; i+=MATRIX_WIDTH)
{
tmp += A[i] * const_x[i/MATRIX_HEIGHT];
}
b[x] = tmp;
}
// Version un peu plus lente
__global__ void matVec_V2(float *A, float *b)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
float tmp = 0;
int offset = x * MATRIX_WIDTH;
for (int i=0; i<MATRIX_WIDTH; i++)
{
tmp += A[i+offset] * const_x[i];
}
b[x] = tmp;
}
// Version avec flux slow
__global__ void matVec(float *A, float *b)
{
register int x = threadIdx.x + blockIdx.x * blockDim.x;
register float tmp = 0;
// register int offset = x*MATRIX_WIDTH;
#pragma unroll
for (int i=0; i<MATRIX_WIDTH; i++)
{
tmp += 2.0 * const_x[i]; // A[i+offset]
}
b[x] = tmp;
}
// Initialisation d'un vecteur à zero
__global__ void init_0(float *x)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
x[idx] = 0;
}
/************************************************************************/
/* Main */
/************************************************************************/
int main(int argc, char* argv[])
{
cudaStream_t stream0, stream1;
cudaStreamCreate(&stream0);
cudaStreamCreate(&stream1);
float *host_A, *host_x, *host_b;
float *dev_A0, *dev_b;
float *dev_A1;
// allocate the memory on the Host (CPU)
HANDLE_ERROR( cudaHostAlloc((void**)&host_A,
MATRIX_WIDTH * MATRIX_HEIGHT * sizeof(float),
cudaHostAllocDefault ));
HANDLE_ERROR( cudaHostAlloc((void**)&host_x,
VECTOR_LENGTH * sizeof(float),
cudaHostAllocDefault ));
HANDLE_ERROR( cudaHostAlloc((void**)&host_b,
VECTOR_LENGTH * sizeof(float),
cudaHostAllocDefault ));
// allocate the memory on the GPU
HANDLE_ERROR( cudaMalloc( (void**)&dev_A0,
MATRIX_WIDTH * NB_THREAD * sizeof(float) ));
HANDLE_ERROR( cudaMalloc( (void**)&dev_b,
VECTOR_LENGTH * sizeof(float) ));
HANDLE_ERROR( cudaMalloc( (void**)&dev_A1,
MATRIX_WIDTH * NB_THREAD * sizeof(float) ));
// init data
for (int i = 0; i < MATRIX_WIDTH * MATRIX_HEIGHT; ++i)
{
host_A[i] = (float)(i/MATRIX_WIDTH);
}
for (int i = 0; i < VECTOR_LENGTH; ++i )
{
host_x[i] = 2.0;
}
/* mesure du temps d'execution */
cudaEvent_t start, stop;
float tempsGPU;
HANDLE_ERROR( cudaEventCreate(&start) );
HANDLE_ERROR( cudaEventCreate(&stop) );
HANDLE_ERROR( cudaEventRecord(start, stream0) );
/* Copie des données vers le GPU */
HANDLE_ERROR( cudaMemcpyToSymbol( const_x, host_x, VECTOR_LENGTH * sizeof(float), 0,
cudaMemcpyHostToDevice) );
/* Initialisation des données sur le GPU */
init_0<<<1, VECTOR_LENGTH>>>(dev_b);
dim3 ThreadPerBlock ( NB_THREAD , 1 );
dim3 BlockPerGrid ( 1 , 1 );
int offset = NB_THREAD * MATRIX_WIDTH;
for (int i=0 ; i < NB_ITER ; i+=2)
{
HANDLE_ERROR( cudaMemcpyAsync(dev_A0, host_A + i*offset,
MATRIX_WIDTH * NB_THREAD * sizeof(float), cudaMemcpyHostToDevice, stream0) );
HANDLE_ERROR( cudaMemcpyAsync(dev_A1, host_A + (i+1)*offset,
MATRIX_WIDTH * NB_THREAD * sizeof(float), cudaMemcpyHostToDevice, stream1) );
matVec<<<BlockPerGrid, ThreadPerBlock, 0, stream0>>>(dev_A0, dev_b + i*NB_THREAD);
matVec<<<BlockPerGrid, ThreadPerBlock, 0, stream1>>>(dev_A1, dev_b + (i+1)*NB_THREAD);
}
HANDLE_ERROR( cudaStreamSynchronize(stream0));
HANDLE_ERROR( cudaStreamSynchronize(stream1));
HANDLE_ERROR( cudaMemcpy( host_b, dev_b, VECTOR_LENGTH * sizeof(float), cudaMemcpyDeviceToHost));
/* Fin de la mesure du temps d'execution du programme */
cudaEventRecord(stop, stream0);
cudaEventSynchronize( stop );
cudaEventElapsedTime(&tempsGPU, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree( dev_A0 );
cudaFree( dev_A1 );
cudaFree( dev_b );
cudaStreamDestroy(stream0);
/* vérification des résultats */
printf("Resultats calcul GPU : \n");
for (int i=0; i < VECTOR_LENGTH/NB_THREAD; i++)
{
printf("b[%3d] = %5.1f \n", i*NB_THREAD, host_b[i*NB_THREAD] );
}
/* affichage du temps d'execution */
printf("temps écoule sur GPU : %f ms \n\n", tempsGPU);
/**********************************************
execution de la même opération sur CPU
**********************************************/
int k=1;
clock_t t1, t2;
double tempsCPU;
t1 = clock();
/* execution de l'opération sur CPU */
for (k=0; k<50; k++)
{
for (int i=0; i<MATRIX_HEIGHT; i++)
{
host_b[i] = 0;
for (int j=0; j<MATRIX_WIDTH; j++)
{
host_b[i] += host_A[i*MATRIX_WIDTH + j] * host_x[j];
}
}
}
for (int i=0; i < VECTOR_LENGTH/NB_THREAD; i++)
{
printf("b[%3d] = %5.1f \n", i*NB_THREAD, host_b[i*NB_THREAD] );
}
t2 = clock();
tempsCPU = (double)difftime(t2, t1)/(double)CLOCKS_PAR_SEC;
/* affichage du temps d'execution */
tempsCPU = tempsCPU * 1000.0 / k;
printf("temps écoule sur CPU: %f ms \n\n", tempsCPU);
printf("Speedup : %3.2fx \n", tempsCPU / tempsGPU);
cudaFreeHost(host_A);
cudaFreeHost(host_b);
return EXIT_SUCCESS;
}
|
d5d64b8f1c4dfe795fdbb6ac084cb94f391f774c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
mainGround.cu
Author: Brian Ichter
This runs the GMT* and MCMP algorithms for a double integrator system (representing a quadrotor model). This main file
is used primarily for timing results and evaluations of solution quality, rather than to be run directly with the quad
(we use mainQuad.cu for this).
Run instructions:
TODO
*/
#include <iostream>
#include <fstream>
#include <ctime>
#include <cstdlib>
#include <algorithm>
#include <vector>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/remove.h>
#include <thrust/sort.h>
#include <thrust/scan.h>
#include <thrust/extrema.h>
#include <thrust/reduce.h>
#include <limits>
#include <sstream>
#include "motionPlanningProblem.cuh"
#include "motionPlanningSolution.cuh"
#include "obstacles.cuh"
#include "helper.cuh"
#include "sampler.cuh"
#include "2pBVP.cuh"
#include "GMT.cuh"
#include "PRM.cuh"
#include "FMT.cuh"
#include "dubinsAirplane.cuh"
// compiler inputs
#ifndef DIM
#error Please define DIM.
#endif
#ifndef NUM
#error Please define NUM.
#endif
// horrible coding, but will get the job done for now
int DIMdubMain = 4;
// ***************** offline settings (paste setup here)
float lo[DIM] = {0, 0, 0, 0};
float hi[DIM] = {5, 5, 5, 2*M_PI};
int edgeDiscNum = 8;
float dt = 0.05; // time step for dynamic propagation
int numDisc = 4; // number of discretizations of kinodynamic paths
int numControls = 3; // number of total controls (i.e., Dubins word length)
float ms = 1000;
bool verbose = true;
int main(int argc, const char* argv[]) {
float x0dub[4] = {0, 0, 0, M_PI/2};
float x1dub[4] = {1, -3, 1, 0};
std::vector<int> control(numControls);
std::vector<float> controlD(numControls);
std::vector<float> path(DIMdubMain*numDisc*numControls);
float cmin = dubinsAirplaneCost(x0dub, x1dub, control.data(), controlD.data());
std::cout << "cost = " << cmin << ", word is " << control[0] << ", " << control[1] << ", " << control[2] << std::endl;
std::cout << "durations (" << controlD[0] << ", " << controlD[1] << ", " << controlD[2] << ")" << std::endl;
dubinsAirplanePath(x0dub, x1dub, control.data(), controlD.data(), path.data(), numDisc);
printArray(path.data(), numControls*numDisc, DIMdubMain, std::cout);
std::cout << "*********** Beginning Dubins Aircraft Run (DIM = " << DIM << ", NUM = " << NUM << ") **********" << std::endl;
// check setup is 3D DI
if (DIM != 4) {
std::cout << "DIM must be 4 for Dubins airplane (x y z theta)" << std::endl;
return -1;
}
// check a file has been specific
if (argc != 2) {
std::cout << "Must specify an problem setup filename, i.e. $ ./dubins file.txt" << std::endl;
return -1;
}
int count = 0;
hipGetDeviceCount(&count);
hipError_t code;
int deviceNum = 1;
hipSetDevice(deviceNum);
std::cout << "Number of CUDA devices = " << count << ", selecting " << deviceNum << std::endl;
code = hipPeekAtLastError();
if (hipSuccess != code) { std::cout << "ERROR on selecting device: " << hipGetErrorString(code) << std::endl; }
MotionPlanningProblem mpp;
mpp.filename = argv[1];
mpp.dimC = DIM;
mpp.dimW = 3;
mpp.numSamples = NUM;
mpp.edgeDiscNum = edgeDiscNum;
mpp.dt = dt;
mpp.hi.resize(mpp.dimC);
mpp.lo.resize(mpp.dimC);
for (int i = 0; i < DIM; ++i) {
mpp.hi[i] = hi[i];
mpp.lo[i] = lo[i];
}
// init and goal states (must then connect to the final tree)
std::vector<float> init(DIM, 0.1);
std::vector<float> goal(DIM, 4.9);
init[3] = M_PI/2;
goal[3] = M_PI/2;
int goalIdx = NUM-1;
int initIdx = 0;
int numObstacles = getObstaclesCount();
std::vector<float> obstacles(numObstacles*2*DIM);
generateObstacles(obstacles.data(), numObstacles*2*DIM);
std::ifstream file (mpp.filename);
std::string readValue;
if (file.good()) {
// read in init
for (int d = 0; d < DIM; ++d) {
getline(file, readValue, ',');
std::stringstream convertorInit(readValue);
convertorInit >> init[d];
}
for (int d = 0; d < DIM; ++d) {
getline(file, readValue, ',');
std::stringstream convertorGoal(readValue);
convertorGoal >> goal[d];
}
for (int d = 0; d < DIM; ++d) {
getline(file, readValue, ',');
std::stringstream convertorLo(readValue);
convertorLo >> lo[d];
}
for (int d = 0; d < DIM; ++d) {
getline(file, readValue, ',');
std::stringstream convertorHi(readValue);
convertorHi >> hi[d];
}
getline(file, readValue, ',');
std::stringstream convertorNumObs(readValue);
convertorNumObs >> numObstacles;
obstacles.resize(numObstacles*DIM*2);
for (int obs = 0; obs < numObstacles; ++obs) {
for (int d = 0; d < DIM*2; ++d) {
getline(file, readValue, ',');
std::stringstream convertorObs(readValue);
convertorObs >> obstacles[obs*DIM*2 + d];
}
}
if (verbose) {
std::cout << "***** Inputs from " << mpp.filename << " are: *****" << std::endl;
}
if (verbose) {
std::cout << "init is: "; printArray(&init[0],1,DIM,std::cout);
}
if (verbose) {
std::cout << "goal is: "; printArray(&goal[0],1,DIM,std::cout);
}
if (verbose) {
std::cout << "lo is: "; printArray(&lo[0],1,DIM,std::cout);
}
if (verbose) {
std::cout << "hi is: "; printArray(&hi[0],1,DIM,std::cout);
}
if (verbose) {
std::cout << "obstacle count = " << numObstacles << std::endl;
}
if(verbose) {
printArray(&obstacles[0],numObstacles,2*DIM,std::cout);
}
} else {
std::cout << "didn't read in file, bad file!" << std::endl;
}
std::cout << "--- Motion planning problem, " << mpp.filename << " ---" << std::endl;
std::cout << "Sample count = " << mpp.numSamples << ", C-space dim = " << mpp.dimC << ", Workspace dim = " << mpp.dimW << std::endl;
std::cout << "hi = ["; for (int i = 0; i < mpp.dimC; ++i) { std::cout << hi[i] << " "; } std::cout << "], ";
std::cout << "lo = ["; for (int i = 0; i < mpp.dimC; ++i) { std::cout << lo[i] << " "; } std::cout << "]" << std::endl;
std::cout << "edge discretizations " << mpp.edgeDiscNum << ", dt = " << mpp.dt << std::endl;
/*********************** create array to return debugging information ***********************/
float *d_debugOutput;
hipMalloc(&d_debugOutput, sizeof(float)*NUM);
// ***************** setup data structures and struct
// ***************** precomputation
std::vector<float> samplesAll (DIM*NUM);
createSamplesHalton(0, samplesAll.data(), &(init[0]), &(goal[0]), lo, hi);
thrust::device_vector<float> d_samples_thrust(DIM*NUM);
float *d_samples = thrust::raw_pointer_cast(d_samples_thrust.data());
CUDA_ERROR_CHECK(hipMemcpy(d_samples, samplesAll.data(), sizeof(float)*DIM*NUM, hipMemcpyHostToDevice));
// calculate nn
int nullControl[3]; // throw away values for computing the cost of each connection
float nullControlD[3];
double t_calc10thStart = std::clock();
std::vector<float> topts ((NUM-1)*(NUM));
std::vector<float> copts ((NUM-1)*(NUM));
float percentile = 0.05;
int rnIdx = (NUM-1)*(NUM)*percentile; // 10th quartile and number of NN
int numEdges = rnIdx;
std::cout << "rnIdx is " << rnIdx << std::endl;
std::cout << "This is a bit slow as it is currently implemented on CPU, can be sped up significantly with a simple GPU implementation" << std::endl;
int idx = 0;
std::vector<float> c2g (NUM);
for (int i = 0; i < NUM; ++i) {
for (int j = 0; j < NUM; ++j) {
if (j == i)
continue;
topts[idx] = dubinsAirplaneCost(&(samplesAll[i*DIM]), &(samplesAll[j*DIM]), nullControl, nullControlD);
copts[idx] = dubinsAirplaneCost(&(samplesAll[i*DIM]), &(samplesAll[j*DIM]), nullControl, nullControlD);
idx++;
}
c2g[i] = dubinsAirplaneCost(&(samplesAll[i*DIM]), &(samplesAll[goalIdx*DIM]), nullControl, nullControlD);
}
std::vector<float> coptsSorted ((NUM-1)*(NUM));
coptsSorted = copts;
std::sort (coptsSorted.begin(), coptsSorted.end());
float rn = coptsSorted[rnIdx];
double t_calc10th = (std::clock() - t_calc10thStart) / (double) CLOCKS_PER_SEC;
std::cout << percentile << "th percentile pre calc took: " << t_calc10th*ms << " ms for " << idx << " solves and cutoff is "
<< rn << " at " << rnIdx << std::endl;
double t_2pbvpTestStart = std::clock();
float x0[DIM], x1[DIM];
double t_discMotionsStart = std::clock();
std::vector<int> nnIdxs(NUM*NUM,-3);
int nnGoSizes[NUM];
int nnComeSizes[NUM];
for (int i = 0; i < NUM; ++i) {
nnGoSizes[i] = 0;
nnComeSizes[i] = 0;
}
std::vector<float> discMotions (numEdges*(numControls*numDisc)*DIM,0); // array of motions, but its a vector who idk for some reason it won't work as an array
int nnIdx = 0; // index position in NN discretization array
idx = 0; // index position in copts vector above
std::vector<float> coptsEdge (numEdges); // edge index accessed copts
std::vector<float> toptsEdge (numEdges); // edge index accessed topts
std::vector<int> controlEdge (numEdges*numControls); // edge index accessed controls
std::vector<float> controlDEdge (numEdges*numControls); // edge index accessed control durations
for (int i = 0; i < NUM; ++i) {
for (int d = 0; d < DIM; ++d)
x0[d] = samplesAll[d + DIM*i];
for (int j = 0; j < NUM; ++j) {
if (j == i)
continue;
if (copts[idx] < rn) {
coptsEdge[nnIdx] = copts[idx];
toptsEdge[nnIdx] = topts[idx];
for (int d = 0; d < DIM; ++d)
x1[d] = samplesAll[d + DIM*j];
nnIdxs[j*NUM+i] = nnIdx; // look up for discrete motions from i -> j
float tmpC = dubinsAirplaneCost(x0, x1, &(controlEdge[nnIdx*numControls]),
&(controlDEdge[nnIdx*numControls]));
dubinsAirplanePath(x0, x1,
&(controlEdge[nnIdx*numControls]),
&(controlDEdge[nnIdx*numControls]),
&(discMotions[nnIdx*DIM*(numControls*numDisc)]), numDisc);
nnGoSizes[i]++;
nnComeSizes[j]++;
nnIdx++;
}
idx++;
}
}
double t_discMotions = (std::clock() - t_discMotionsStart) / (double) CLOCKS_PER_SEC;
std::cout << "Discretizing motions took: " << t_discMotions*ms << " ms for " << nnIdx << " solves" << std::endl;
// printArray(&(discMotions[20000*DIM]), 200, DIM, std::cout);
// printArray(&(nnGoSizes[0]), 1, 1000, std::cout);
float *d_toptsEdge;
CUDA_ERROR_CHECK(hipMalloc(&d_toptsEdge, sizeof(float)*numEdges));
CUDA_ERROR_CHECK(hipMemcpy(d_toptsEdge, toptsEdge.data(), sizeof(float)*numEdges, hipMemcpyHostToDevice));
float *d_coptsEdge;
CUDA_ERROR_CHECK(hipMalloc(&d_coptsEdge, sizeof(float)*numEdges));
CUDA_ERROR_CHECK(hipMemcpy(d_coptsEdge, coptsEdge.data(), sizeof(float)*numEdges, hipMemcpyHostToDevice));
int maxNNSize = 0;
for (int i = 0; i < NUM; ++i) {
if (maxNNSize < nnGoSizes[i])
maxNNSize = nnGoSizes[i];
if (maxNNSize < nnComeSizes[i])
maxNNSize = nnComeSizes[i];
}
std::cout << "max number of nn is " << maxNNSize << std::endl;
std::vector<float> distancesCome (NUM*maxNNSize, 0);
std::vector<int> nnGoEdges (NUM*maxNNSize, -1); // edge gives indices (i,j) to check nnIdx to then find the discretized path
std::vector<int> nnComeEdges (NUM*maxNNSize, -1); // edge gives indices (j,i) to check nnIdx to then find the discretized path
std::vector<float> adjCosts (NUM*NUM,10000);
std::vector<float> adjTimes (NUM*NUM,10000);
idx = 0;
for (int i = 0; i < NUM; ++i) {
nnGoSizes[i] = 0; // clear nnSizes again
nnComeSizes[i] = 0; // clear nnSizes again
}
for (int i = 0; i < NUM; ++i) {
for (int j = 0; j < NUM; ++j) {
if (j == i)
continue;
if (copts[idx] < rn) {
nnGoEdges[i*maxNNSize + nnGoSizes[i]] = j; // edge from i to j (i -> j)
nnComeEdges[j*maxNNSize + nnComeSizes[j]] = i;
distancesCome[j*maxNNSize + nnComeSizes[j]] = copts[idx];
nnGoSizes[i]++;
nnComeSizes[j]++;
adjCosts[i*NUM + j] = copts[idx]; // cost to go from i to j
adjTimes[i*NUM + j] = topts[idx]; // time to go from i to j
}
idx++;
}
}
// put NN onto device
float *d_discMotions;
CUDA_ERROR_CHECK(hipMalloc(&d_discMotions, sizeof(float)*numEdges*(numControls*numDisc)*DIM));
CUDA_ERROR_CHECK(hipMemcpy(d_discMotions, &discMotions[0], sizeof(float)*numEdges*(numControls*numDisc)*DIM, hipMemcpyHostToDevice));
// std::cout << "**** disc motions = " << std::endl;
// printArray(&discMotions[0], 30, DIM, std::cout);
int *d_nnIdxs;
CUDA_ERROR_CHECK(hipMalloc(&d_nnIdxs, sizeof(int)*NUM*NUM));
CUDA_ERROR_CHECK(hipMemcpy(d_nnIdxs, &(nnIdxs[0]), sizeof(int)*NUM*NUM, hipMemcpyHostToDevice));
float *d_distancesCome;
CUDA_ERROR_CHECK(hipMalloc(&d_distancesCome, sizeof(float)*NUM*maxNNSize));
CUDA_ERROR_CHECK(hipMemcpy(d_distancesCome, &(distancesCome[0]), sizeof(float)*NUM*maxNNSize, hipMemcpyHostToDevice));
int *d_nnGoEdges;
CUDA_ERROR_CHECK(hipMalloc(&d_nnGoEdges, sizeof(int)*NUM*maxNNSize));
CUDA_ERROR_CHECK(hipMemcpy(d_nnGoEdges, &(nnGoEdges[0]), sizeof(int)*NUM*maxNNSize, hipMemcpyHostToDevice));
int *d_nnComeEdges;
CUDA_ERROR_CHECK(hipMalloc(&d_nnComeEdges, sizeof(int)*NUM*maxNNSize));
CUDA_ERROR_CHECK(hipMemcpy(d_nnComeEdges, &(nnComeEdges[0]), sizeof(int)*NUM*maxNNSize, hipMemcpyHostToDevice));
float *d_costs;
hipMalloc(&d_costs, sizeof(float)*NUM);
thrust::device_vector<int> d_edges(NUM);
int* d_edges_ptr = thrust::raw_pointer_cast(d_edges.data());
// ***************** read in online problem parameters from filename input
// obstacles
std::cout << "Obstacle set, count = " << numObstacles << ":" << std::endl;
// printArray(obstacles.data(), numObstacles, 2*DIM, std::cout);
// load obstacles on device
float *d_obstacles;
CUDA_ERROR_CHECK(hipMalloc(&d_obstacles, sizeof(float)*2*numObstacles*DIM));
CUDA_ERROR_CHECK(hipMemcpy(d_obstacles, obstacles.data(), sizeof(float)*2*numObstacles*DIM, hipMemcpyHostToDevice));
// sample free
bool isFreeSamples[NUM];
thrust::device_vector<bool> d_isFreeSamples_thrust(NUM);
bool* d_isFreeSamples = thrust::raw_pointer_cast(d_isFreeSamples_thrust.data());
double t_sampleFreeStart = std::clock();
const int blockSizeSF = 192;
const int gridSizeSF = ::min((NUM + blockSizeSF - 1) / blockSizeSF, 2147483647);
if (gridSizeSF == 2147483647)
std::cout << "...... ERROR: increase grid size for sampleFree" << std::endl;
hipLaunchKernelGGL(( sampleFree), dim3(gridSizeSF), dim3(blockSizeSF), 0, 0,
d_obstacles, numObstacles, d_samples, d_isFreeSamples, d_debugOutput);
hipDeviceSynchronize();
code = hipPeekAtLastError();
if (hipSuccess != code) { std::cout << "ERROR on freeEdges: " << hipGetErrorString(code) << std::endl; }
double t_sampleFree = (std::clock() - t_sampleFreeStart) / (double) CLOCKS_PER_SEC;
std::cout << "Sample free took: " << t_sampleFree << " s" << std::endl;
CUDA_ERROR_CHECK(hipMemcpy(isFreeSamples, d_isFreeSamples, sizeof(bool)*NUM, hipMemcpyDeviceToHost));
// run GMT
double t_gmtStart = std::clock();
std::cout << "Running wavefront expansion GMT" << std::endl;
GMTwavefront(&(init[0]), &(goal[0]), d_obstacles, numObstacles,
d_distancesCome, d_nnGoEdges, d_nnComeEdges, maxNNSize, d_discMotions, d_nnIdxs,
d_samples, NUM, d_isFreeSamples, rn, numDisc*numControls-1,
d_costs, d_edges_ptr, initIdx, goalIdx) ;
double t_gmt = (std::clock() - t_gmtStart) / (double) CLOCKS_PER_SEC;
std::cout << "******** GMT took: " << t_gmt << " s" << std::endl;
float costGoal = 0;
hipMemcpy(&costGoal, d_costs+goalIdx, sizeof(float), hipMemcpyDeviceToHost);
std::cout << "Solution cost: " << costGoal << std::endl;
// ***************** output results
std::ofstream matlabData;
matlabData.open ("matlabInflationData.txt");
matlabData << "obstacles.data() = [";
printArray(obstacles.data(), 2*numObstacles, DIM, matlabData);
matlabData << "];" << std::endl;
true && printSolution(NUM, d_samples, d_edges_ptr, d_costs);
matlabData.close();
// ***************** PRM
std::vector<float> costs(NUM,10000);
double t_PRMStart = std::clock();
std::cout << "Running PRM" << std::endl;
PRM(&(init[0]), &(goal[0]), d_obstacles, numObstacles,
adjCosts, nnGoEdges, nnComeEdges, maxNNSize, d_discMotions, nnIdxs,
d_samples, NUM, d_isFreeSamples, rn, numDisc*numControls-1, numEdges,
costs, d_edges_ptr, initIdx, goalIdx,
c2g);
double t_PRM = (std::clock() - t_PRMStart) / (double) CLOCKS_PER_SEC;
std::cout << "******** PRM took: " << t_PRM << " s" << std::endl;
// ***************** FMT
double t_FMTStart = std::clock();
std::cout << "Running FMT" << std::endl;
// call FMT
FMTdub(&(init[0]), &(goal[0]), obstacles.data(), numObstacles,
adjCosts, nnGoEdges, nnComeEdges, maxNNSize, discMotions, nnIdxs,
NUM, d_isFreeSamples, rn, numDisc*numControls-1, numEdges,
costs, initIdx, goalIdx,
samplesAll, adjTimes,
numControls, controlEdge, controlDEdge);
double t_FMT = (std::clock() - t_FMTStart) / (double) CLOCKS_PER_SEC;
std::cout << "******** FMT took: " << t_FMT << " s" << std::endl;
// ***************** free memory
hipFree(d_obstacles);
hipFree(d_toptsEdge);
hipFree(d_coptsEdge);
hipFree(d_discMotions);
hipFree(d_nnIdxs);
hipFree(d_distancesCome);
hipFree(d_nnGoEdges);
hipFree(d_nnComeEdges);
hipFree(d_costs);
} | d5d64b8f1c4dfe795fdbb6ac084cb94f391f774c.cu | /*
mainGround.cu
Author: Brian Ichter
This runs the GMT* and MCMP algorithms for a double integrator system (representing a quadrotor model). This main file
is used primarily for timing results and evaluations of solution quality, rather than to be run directly with the quad
(we use mainQuad.cu for this).
Run instructions:
TODO
*/
#include <iostream>
#include <fstream>
#include <ctime>
#include <cstdlib>
#include <algorithm>
#include <vector>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/remove.h>
#include <thrust/sort.h>
#include <thrust/scan.h>
#include <thrust/extrema.h>
#include <thrust/reduce.h>
#include <limits>
#include <sstream>
#include "motionPlanningProblem.cuh"
#include "motionPlanningSolution.cuh"
#include "obstacles.cuh"
#include "helper.cuh"
#include "sampler.cuh"
#include "2pBVP.cuh"
#include "GMT.cuh"
#include "PRM.cuh"
#include "FMT.cuh"
#include "dubinsAirplane.cuh"
// compiler inputs
#ifndef DIM
#error Please define DIM.
#endif
#ifndef NUM
#error Please define NUM.
#endif
// horrible coding, but will get the job done for now
int DIMdubMain = 4;
// ***************** offline settings (paste setup here)
float lo[DIM] = {0, 0, 0, 0};
float hi[DIM] = {5, 5, 5, 2*M_PI};
int edgeDiscNum = 8;
float dt = 0.05; // time step for dynamic propagation
int numDisc = 4; // number of discretizations of kinodynamic paths
int numControls = 3; // number of total controls (i.e., Dubins word length)
float ms = 1000;
bool verbose = true;
int main(int argc, const char* argv[]) {
float x0dub[4] = {0, 0, 0, M_PI/2};
float x1dub[4] = {1, -3, 1, 0};
std::vector<int> control(numControls);
std::vector<float> controlD(numControls);
std::vector<float> path(DIMdubMain*numDisc*numControls);
float cmin = dubinsAirplaneCost(x0dub, x1dub, control.data(), controlD.data());
std::cout << "cost = " << cmin << ", word is " << control[0] << ", " << control[1] << ", " << control[2] << std::endl;
std::cout << "durations (" << controlD[0] << ", " << controlD[1] << ", " << controlD[2] << ")" << std::endl;
dubinsAirplanePath(x0dub, x1dub, control.data(), controlD.data(), path.data(), numDisc);
printArray(path.data(), numControls*numDisc, DIMdubMain, std::cout);
std::cout << "*********** Beginning Dubins Aircraft Run (DIM = " << DIM << ", NUM = " << NUM << ") **********" << std::endl;
// check setup is 3D DI
if (DIM != 4) {
std::cout << "DIM must be 4 for Dubins airplane (x y z theta)" << std::endl;
return -1;
}
// check a file has been specific
if (argc != 2) {
std::cout << "Must specify an problem setup filename, i.e. $ ./dubins file.txt" << std::endl;
return -1;
}
int count = 0;
cudaGetDeviceCount(&count);
cudaError_t code;
int deviceNum = 1;
cudaSetDevice(deviceNum);
std::cout << "Number of CUDA devices = " << count << ", selecting " << deviceNum << std::endl;
code = cudaPeekAtLastError();
if (cudaSuccess != code) { std::cout << "ERROR on selecting device: " << cudaGetErrorString(code) << std::endl; }
MotionPlanningProblem mpp;
mpp.filename = argv[1];
mpp.dimC = DIM;
mpp.dimW = 3;
mpp.numSamples = NUM;
mpp.edgeDiscNum = edgeDiscNum;
mpp.dt = dt;
mpp.hi.resize(mpp.dimC);
mpp.lo.resize(mpp.dimC);
for (int i = 0; i < DIM; ++i) {
mpp.hi[i] = hi[i];
mpp.lo[i] = lo[i];
}
// init and goal states (must then connect to the final tree)
std::vector<float> init(DIM, 0.1);
std::vector<float> goal(DIM, 4.9);
init[3] = M_PI/2;
goal[3] = M_PI/2;
int goalIdx = NUM-1;
int initIdx = 0;
int numObstacles = getObstaclesCount();
std::vector<float> obstacles(numObstacles*2*DIM);
generateObstacles(obstacles.data(), numObstacles*2*DIM);
std::ifstream file (mpp.filename);
std::string readValue;
if (file.good()) {
// read in init
for (int d = 0; d < DIM; ++d) {
getline(file, readValue, ',');
std::stringstream convertorInit(readValue);
convertorInit >> init[d];
}
for (int d = 0; d < DIM; ++d) {
getline(file, readValue, ',');
std::stringstream convertorGoal(readValue);
convertorGoal >> goal[d];
}
for (int d = 0; d < DIM; ++d) {
getline(file, readValue, ',');
std::stringstream convertorLo(readValue);
convertorLo >> lo[d];
}
for (int d = 0; d < DIM; ++d) {
getline(file, readValue, ',');
std::stringstream convertorHi(readValue);
convertorHi >> hi[d];
}
getline(file, readValue, ',');
std::stringstream convertorNumObs(readValue);
convertorNumObs >> numObstacles;
obstacles.resize(numObstacles*DIM*2);
for (int obs = 0; obs < numObstacles; ++obs) {
for (int d = 0; d < DIM*2; ++d) {
getline(file, readValue, ',');
std::stringstream convertorObs(readValue);
convertorObs >> obstacles[obs*DIM*2 + d];
}
}
if (verbose) {
std::cout << "***** Inputs from " << mpp.filename << " are: *****" << std::endl;
}
if (verbose) {
std::cout << "init is: "; printArray(&init[0],1,DIM,std::cout);
}
if (verbose) {
std::cout << "goal is: "; printArray(&goal[0],1,DIM,std::cout);
}
if (verbose) {
std::cout << "lo is: "; printArray(&lo[0],1,DIM,std::cout);
}
if (verbose) {
std::cout << "hi is: "; printArray(&hi[0],1,DIM,std::cout);
}
if (verbose) {
std::cout << "obstacle count = " << numObstacles << std::endl;
}
if(verbose) {
printArray(&obstacles[0],numObstacles,2*DIM,std::cout);
}
} else {
std::cout << "didn't read in file, bad file!" << std::endl;
}
std::cout << "--- Motion planning problem, " << mpp.filename << " ---" << std::endl;
std::cout << "Sample count = " << mpp.numSamples << ", C-space dim = " << mpp.dimC << ", Workspace dim = " << mpp.dimW << std::endl;
std::cout << "hi = ["; for (int i = 0; i < mpp.dimC; ++i) { std::cout << hi[i] << " "; } std::cout << "], ";
std::cout << "lo = ["; for (int i = 0; i < mpp.dimC; ++i) { std::cout << lo[i] << " "; } std::cout << "]" << std::endl;
std::cout << "edge discretizations " << mpp.edgeDiscNum << ", dt = " << mpp.dt << std::endl;
/*********************** create array to return debugging information ***********************/
float *d_debugOutput;
cudaMalloc(&d_debugOutput, sizeof(float)*NUM);
// ***************** setup data structures and struct
// ***************** precomputation
std::vector<float> samplesAll (DIM*NUM);
createSamplesHalton(0, samplesAll.data(), &(init[0]), &(goal[0]), lo, hi);
thrust::device_vector<float> d_samples_thrust(DIM*NUM);
float *d_samples = thrust::raw_pointer_cast(d_samples_thrust.data());
CUDA_ERROR_CHECK(cudaMemcpy(d_samples, samplesAll.data(), sizeof(float)*DIM*NUM, cudaMemcpyHostToDevice));
// calculate nn
int nullControl[3]; // throw away values for computing the cost of each connection
float nullControlD[3];
double t_calc10thStart = std::clock();
std::vector<float> topts ((NUM-1)*(NUM));
std::vector<float> copts ((NUM-1)*(NUM));
float percentile = 0.05;
int rnIdx = (NUM-1)*(NUM)*percentile; // 10th quartile and number of NN
int numEdges = rnIdx;
std::cout << "rnIdx is " << rnIdx << std::endl;
std::cout << "This is a bit slow as it is currently implemented on CPU, can be sped up significantly with a simple GPU implementation" << std::endl;
int idx = 0;
std::vector<float> c2g (NUM);
for (int i = 0; i < NUM; ++i) {
for (int j = 0; j < NUM; ++j) {
if (j == i)
continue;
topts[idx] = dubinsAirplaneCost(&(samplesAll[i*DIM]), &(samplesAll[j*DIM]), nullControl, nullControlD);
copts[idx] = dubinsAirplaneCost(&(samplesAll[i*DIM]), &(samplesAll[j*DIM]), nullControl, nullControlD);
idx++;
}
c2g[i] = dubinsAirplaneCost(&(samplesAll[i*DIM]), &(samplesAll[goalIdx*DIM]), nullControl, nullControlD);
}
std::vector<float> coptsSorted ((NUM-1)*(NUM));
coptsSorted = copts;
std::sort (coptsSorted.begin(), coptsSorted.end());
float rn = coptsSorted[rnIdx];
double t_calc10th = (std::clock() - t_calc10thStart) / (double) CLOCKS_PER_SEC;
std::cout << percentile << "th percentile pre calc took: " << t_calc10th*ms << " ms for " << idx << " solves and cutoff is "
<< rn << " at " << rnIdx << std::endl;
double t_2pbvpTestStart = std::clock();
float x0[DIM], x1[DIM];
double t_discMotionsStart = std::clock();
std::vector<int> nnIdxs(NUM*NUM,-3);
int nnGoSizes[NUM];
int nnComeSizes[NUM];
for (int i = 0; i < NUM; ++i) {
nnGoSizes[i] = 0;
nnComeSizes[i] = 0;
}
std::vector<float> discMotions (numEdges*(numControls*numDisc)*DIM,0); // array of motions, but its a vector who idk for some reason it won't work as an array
int nnIdx = 0; // index position in NN discretization array
idx = 0; // index position in copts vector above
std::vector<float> coptsEdge (numEdges); // edge index accessed copts
std::vector<float> toptsEdge (numEdges); // edge index accessed topts
std::vector<int> controlEdge (numEdges*numControls); // edge index accessed controls
std::vector<float> controlDEdge (numEdges*numControls); // edge index accessed control durations
for (int i = 0; i < NUM; ++i) {
for (int d = 0; d < DIM; ++d)
x0[d] = samplesAll[d + DIM*i];
for (int j = 0; j < NUM; ++j) {
if (j == i)
continue;
if (copts[idx] < rn) {
coptsEdge[nnIdx] = copts[idx];
toptsEdge[nnIdx] = topts[idx];
for (int d = 0; d < DIM; ++d)
x1[d] = samplesAll[d + DIM*j];
nnIdxs[j*NUM+i] = nnIdx; // look up for discrete motions from i -> j
float tmpC = dubinsAirplaneCost(x0, x1, &(controlEdge[nnIdx*numControls]),
&(controlDEdge[nnIdx*numControls]));
dubinsAirplanePath(x0, x1,
&(controlEdge[nnIdx*numControls]),
&(controlDEdge[nnIdx*numControls]),
&(discMotions[nnIdx*DIM*(numControls*numDisc)]), numDisc);
nnGoSizes[i]++;
nnComeSizes[j]++;
nnIdx++;
}
idx++;
}
}
double t_discMotions = (std::clock() - t_discMotionsStart) / (double) CLOCKS_PER_SEC;
std::cout << "Discretizing motions took: " << t_discMotions*ms << " ms for " << nnIdx << " solves" << std::endl;
// printArray(&(discMotions[20000*DIM]), 200, DIM, std::cout);
// printArray(&(nnGoSizes[0]), 1, 1000, std::cout);
float *d_toptsEdge;
CUDA_ERROR_CHECK(cudaMalloc(&d_toptsEdge, sizeof(float)*numEdges));
CUDA_ERROR_CHECK(cudaMemcpy(d_toptsEdge, toptsEdge.data(), sizeof(float)*numEdges, cudaMemcpyHostToDevice));
float *d_coptsEdge;
CUDA_ERROR_CHECK(cudaMalloc(&d_coptsEdge, sizeof(float)*numEdges));
CUDA_ERROR_CHECK(cudaMemcpy(d_coptsEdge, coptsEdge.data(), sizeof(float)*numEdges, cudaMemcpyHostToDevice));
int maxNNSize = 0;
for (int i = 0; i < NUM; ++i) {
if (maxNNSize < nnGoSizes[i])
maxNNSize = nnGoSizes[i];
if (maxNNSize < nnComeSizes[i])
maxNNSize = nnComeSizes[i];
}
std::cout << "max number of nn is " << maxNNSize << std::endl;
std::vector<float> distancesCome (NUM*maxNNSize, 0);
std::vector<int> nnGoEdges (NUM*maxNNSize, -1); // edge gives indices (i,j) to check nnIdx to then find the discretized path
std::vector<int> nnComeEdges (NUM*maxNNSize, -1); // edge gives indices (j,i) to check nnIdx to then find the discretized path
std::vector<float> adjCosts (NUM*NUM,10000);
std::vector<float> adjTimes (NUM*NUM,10000);
idx = 0;
for (int i = 0; i < NUM; ++i) {
nnGoSizes[i] = 0; // clear nnSizes again
nnComeSizes[i] = 0; // clear nnSizes again
}
for (int i = 0; i < NUM; ++i) {
for (int j = 0; j < NUM; ++j) {
if (j == i)
continue;
if (copts[idx] < rn) {
nnGoEdges[i*maxNNSize + nnGoSizes[i]] = j; // edge from i to j (i -> j)
nnComeEdges[j*maxNNSize + nnComeSizes[j]] = i;
distancesCome[j*maxNNSize + nnComeSizes[j]] = copts[idx];
nnGoSizes[i]++;
nnComeSizes[j]++;
adjCosts[i*NUM + j] = copts[idx]; // cost to go from i to j
adjTimes[i*NUM + j] = topts[idx]; // time to go from i to j
}
idx++;
}
}
// put NN onto device
float *d_discMotions;
CUDA_ERROR_CHECK(cudaMalloc(&d_discMotions, sizeof(float)*numEdges*(numControls*numDisc)*DIM));
CUDA_ERROR_CHECK(cudaMemcpy(d_discMotions, &discMotions[0], sizeof(float)*numEdges*(numControls*numDisc)*DIM, cudaMemcpyHostToDevice));
// std::cout << "**** disc motions = " << std::endl;
// printArray(&discMotions[0], 30, DIM, std::cout);
int *d_nnIdxs;
CUDA_ERROR_CHECK(cudaMalloc(&d_nnIdxs, sizeof(int)*NUM*NUM));
CUDA_ERROR_CHECK(cudaMemcpy(d_nnIdxs, &(nnIdxs[0]), sizeof(int)*NUM*NUM, cudaMemcpyHostToDevice));
float *d_distancesCome;
CUDA_ERROR_CHECK(cudaMalloc(&d_distancesCome, sizeof(float)*NUM*maxNNSize));
CUDA_ERROR_CHECK(cudaMemcpy(d_distancesCome, &(distancesCome[0]), sizeof(float)*NUM*maxNNSize, cudaMemcpyHostToDevice));
int *d_nnGoEdges;
CUDA_ERROR_CHECK(cudaMalloc(&d_nnGoEdges, sizeof(int)*NUM*maxNNSize));
CUDA_ERROR_CHECK(cudaMemcpy(d_nnGoEdges, &(nnGoEdges[0]), sizeof(int)*NUM*maxNNSize, cudaMemcpyHostToDevice));
int *d_nnComeEdges;
CUDA_ERROR_CHECK(cudaMalloc(&d_nnComeEdges, sizeof(int)*NUM*maxNNSize));
CUDA_ERROR_CHECK(cudaMemcpy(d_nnComeEdges, &(nnComeEdges[0]), sizeof(int)*NUM*maxNNSize, cudaMemcpyHostToDevice));
float *d_costs;
cudaMalloc(&d_costs, sizeof(float)*NUM);
thrust::device_vector<int> d_edges(NUM);
int* d_edges_ptr = thrust::raw_pointer_cast(d_edges.data());
// ***************** read in online problem parameters from filename input
// obstacles
std::cout << "Obstacle set, count = " << numObstacles << ":" << std::endl;
// printArray(obstacles.data(), numObstacles, 2*DIM, std::cout);
// load obstacles on device
float *d_obstacles;
CUDA_ERROR_CHECK(cudaMalloc(&d_obstacles, sizeof(float)*2*numObstacles*DIM));
CUDA_ERROR_CHECK(cudaMemcpy(d_obstacles, obstacles.data(), sizeof(float)*2*numObstacles*DIM, cudaMemcpyHostToDevice));
// sample free
bool isFreeSamples[NUM];
thrust::device_vector<bool> d_isFreeSamples_thrust(NUM);
bool* d_isFreeSamples = thrust::raw_pointer_cast(d_isFreeSamples_thrust.data());
double t_sampleFreeStart = std::clock();
const int blockSizeSF = 192;
const int gridSizeSF = std::min((NUM + blockSizeSF - 1) / blockSizeSF, 2147483647);
if (gridSizeSF == 2147483647)
std::cout << "...... ERROR: increase grid size for sampleFree" << std::endl;
sampleFree<<<gridSizeSF, blockSizeSF>>>(
d_obstacles, numObstacles, d_samples, d_isFreeSamples, d_debugOutput);
cudaDeviceSynchronize();
code = cudaPeekAtLastError();
if (cudaSuccess != code) { std::cout << "ERROR on freeEdges: " << cudaGetErrorString(code) << std::endl; }
double t_sampleFree = (std::clock() - t_sampleFreeStart) / (double) CLOCKS_PER_SEC;
std::cout << "Sample free took: " << t_sampleFree << " s" << std::endl;
CUDA_ERROR_CHECK(cudaMemcpy(isFreeSamples, d_isFreeSamples, sizeof(bool)*NUM, cudaMemcpyDeviceToHost));
// run GMT
double t_gmtStart = std::clock();
std::cout << "Running wavefront expansion GMT" << std::endl;
GMTwavefront(&(init[0]), &(goal[0]), d_obstacles, numObstacles,
d_distancesCome, d_nnGoEdges, d_nnComeEdges, maxNNSize, d_discMotions, d_nnIdxs,
d_samples, NUM, d_isFreeSamples, rn, numDisc*numControls-1,
d_costs, d_edges_ptr, initIdx, goalIdx) ;
double t_gmt = (std::clock() - t_gmtStart) / (double) CLOCKS_PER_SEC;
std::cout << "******** GMT took: " << t_gmt << " s" << std::endl;
float costGoal = 0;
cudaMemcpy(&costGoal, d_costs+goalIdx, sizeof(float), cudaMemcpyDeviceToHost);
std::cout << "Solution cost: " << costGoal << std::endl;
// ***************** output results
std::ofstream matlabData;
matlabData.open ("matlabInflationData.txt");
matlabData << "obstacles.data() = [";
printArray(obstacles.data(), 2*numObstacles, DIM, matlabData);
matlabData << "];" << std::endl;
true && printSolution(NUM, d_samples, d_edges_ptr, d_costs);
matlabData.close();
// ***************** PRM
std::vector<float> costs(NUM,10000);
double t_PRMStart = std::clock();
std::cout << "Running PRM" << std::endl;
PRM(&(init[0]), &(goal[0]), d_obstacles, numObstacles,
adjCosts, nnGoEdges, nnComeEdges, maxNNSize, d_discMotions, nnIdxs,
d_samples, NUM, d_isFreeSamples, rn, numDisc*numControls-1, numEdges,
costs, d_edges_ptr, initIdx, goalIdx,
c2g);
double t_PRM = (std::clock() - t_PRMStart) / (double) CLOCKS_PER_SEC;
std::cout << "******** PRM took: " << t_PRM << " s" << std::endl;
// ***************** FMT
double t_FMTStart = std::clock();
std::cout << "Running FMT" << std::endl;
// call FMT
FMTdub(&(init[0]), &(goal[0]), obstacles.data(), numObstacles,
adjCosts, nnGoEdges, nnComeEdges, maxNNSize, discMotions, nnIdxs,
NUM, d_isFreeSamples, rn, numDisc*numControls-1, numEdges,
costs, initIdx, goalIdx,
samplesAll, adjTimes,
numControls, controlEdge, controlDEdge);
double t_FMT = (std::clock() - t_FMTStart) / (double) CLOCKS_PER_SEC;
std::cout << "******** FMT took: " << t_FMT << " s" << std::endl;
// ***************** free memory
cudaFree(d_obstacles);
cudaFree(d_toptsEdge);
cudaFree(d_coptsEdge);
cudaFree(d_discMotions);
cudaFree(d_nnIdxs);
cudaFree(d_distancesCome);
cudaFree(d_nnGoEdges);
cudaFree(d_nnComeEdges);
cudaFree(d_costs);
} |
ce377d08e87487f6748dedfe296347664cc0ea30.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#include<string.h>
#include<cuda.h>
__global__ void funcflops(float *a,float *b,float *c)
{
int t=blockIdx.x*blockDim.x+threadIdx.x;
c[t]=a[t]+b[t]+1;
c[t+2]=a[t+2]*b[t+2];
c[t]=a[t]+b[t];
c[t]=a[t]+b[t];
c[t]=a[t]+b[t];
//c[t]=a[t]*b[t];
//c[t]=a[t]-b[t];
//c[t]=a[t]/b[t];
}
int main( void )
{
int cyclecount=10000;
int devicecount,device;
double time_s;
int blocks,threads,n;
long start_time,end_time;
struct hipDeviceProp_t properties;
float *a, *b, *c;
struct timeval start,stop;
float *dev_a, *dev_b, *dev_c;
hipError_t cudaResultCode = hipGetDeviceCount(&devicecount);
if (cudaResultCode != hipSuccess)
devicecount = 0;
/* machines with no GPUs can still report one emulation device */
for (device = 0; device < devicecount; ++device) {
hipGetDeviceProperties(&properties, device);
if (properties.major != 9999) /* 9999 means emulation only */
if (device==0)
{
printf("multiProcessorCount %d\n",properties.multiProcessorCount);
printf("maxThreadsPerMultiProcessor %d\n",properties.maxThreadsPerMultiProcessor);
blocks=properties.multiProcessorCount;
threads=properties.maxThreadsPerMultiProcessor;
// n=properties.multiProcessorCount * properties.maxThreadsPerMultiProcessor;
n=blocks*threads;
}
}
printf("%s\n",properties.name);
a=(float*)malloc(n * sizeof(float));
b=(float*)malloc(n * sizeof(float));
c=(float*)malloc(n * sizeof(float));
// allocate the memory on the GPU
hipMalloc( (void**)&dev_a, n * sizeof(float) );
hipMalloc( (void**)&dev_b, n * sizeof(float) );
hipMalloc( (void**)&dev_c, n * sizeof(float) );
// fill the arrays 'a' and 'b' on the CPU
for (int i=0; i<n; i++) {
a[i] = -i;
b[i] = i * i;
}
hipMemcpy( dev_a, a, n * sizeof(int),hipMemcpyHostToDevice );
hipMemcpy( dev_b, b, n * sizeof(int),hipMemcpyHostToDevice );
gettimeofday(&start,NULL);
int i;
start_time=start.tv_sec*1000000 + start.tv_usec;//get start time
for(i=0;i<cyclecount;i++)
hipLaunchKernelGGL(( funcflops), dim3(blocks),dim3(threads), 0, 0, dev_a, dev_b, dev_c );
gettimeofday(&stop,NULL);
end_time=stop.tv_sec*1000000 + stop.tv_usec;//get end time
// copy the array 'c' back from the GPU to the CPU
hipMemcpy( c, dev_c, n * sizeof(int),hipMemcpyDeviceToHost );
// display the results
// for (int i=0; i<N; i++) {
// printf( "%d + %d = %d\n", a[i], b[i], c[i] );
// }
// free the memory allocated on the GPU
time_s=end_time-start_time;
printf("Time taken: %lf\n",time_s);
//printf("GFLOPS: %lf\n",);
double d=(double)(cyclecount*n*5)/((double)time_s*1000.0);
//d=d/1000000.0;
printf("GFlops %lf \n",d);
hipFree( dev_a );
hipFree( dev_b );
hipFree( dev_c );
return(0);
}
| ce377d08e87487f6748dedfe296347664cc0ea30.cu | #include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#include<string.h>
#include<cuda.h>
__global__ void funcflops(float *a,float *b,float *c)
{
int t=blockIdx.x*blockDim.x+threadIdx.x;
c[t]=a[t]+b[t]+1;
c[t+2]=a[t+2]*b[t+2];
c[t]=a[t]+b[t];
c[t]=a[t]+b[t];
c[t]=a[t]+b[t];
//c[t]=a[t]*b[t];
//c[t]=a[t]-b[t];
//c[t]=a[t]/b[t];
}
int main( void )
{
int cyclecount=10000;
int devicecount,device;
double time_s;
int blocks,threads,n;
long start_time,end_time;
struct cudaDeviceProp properties;
float *a, *b, *c;
struct timeval start,stop;
float *dev_a, *dev_b, *dev_c;
cudaError_t cudaResultCode = cudaGetDeviceCount(&devicecount);
if (cudaResultCode != cudaSuccess)
devicecount = 0;
/* machines with no GPUs can still report one emulation device */
for (device = 0; device < devicecount; ++device) {
cudaGetDeviceProperties(&properties, device);
if (properties.major != 9999) /* 9999 means emulation only */
if (device==0)
{
printf("multiProcessorCount %d\n",properties.multiProcessorCount);
printf("maxThreadsPerMultiProcessor %d\n",properties.maxThreadsPerMultiProcessor);
blocks=properties.multiProcessorCount;
threads=properties.maxThreadsPerMultiProcessor;
// n=properties.multiProcessorCount * properties.maxThreadsPerMultiProcessor;
n=blocks*threads;
}
}
printf("%s\n",properties.name);
a=(float*)malloc(n * sizeof(float));
b=(float*)malloc(n * sizeof(float));
c=(float*)malloc(n * sizeof(float));
// allocate the memory on the GPU
cudaMalloc( (void**)&dev_a, n * sizeof(float) );
cudaMalloc( (void**)&dev_b, n * sizeof(float) );
cudaMalloc( (void**)&dev_c, n * sizeof(float) );
// fill the arrays 'a' and 'b' on the CPU
for (int i=0; i<n; i++) {
a[i] = -i;
b[i] = i * i;
}
cudaMemcpy( dev_a, a, n * sizeof(int),cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, b, n * sizeof(int),cudaMemcpyHostToDevice );
gettimeofday(&start,NULL);
int i;
start_time=start.tv_sec*1000000 + start.tv_usec;//get start time
for(i=0;i<cyclecount;i++)
funcflops<<<blocks,threads>>>( dev_a, dev_b, dev_c );
gettimeofday(&stop,NULL);
end_time=stop.tv_sec*1000000 + stop.tv_usec;//get end time
// copy the array 'c' back from the GPU to the CPU
cudaMemcpy( c, dev_c, n * sizeof(int),cudaMemcpyDeviceToHost );
// display the results
// for (int i=0; i<N; i++) {
// printf( "%d + %d = %d\n", a[i], b[i], c[i] );
// }
// free the memory allocated on the GPU
time_s=end_time-start_time;
printf("Time taken: %lf\n",time_s);
//printf("GFLOPS: %lf\n",);
double d=(double)(cyclecount*n*5)/((double)time_s*1000.0);
//d=d/1000000.0;
printf("GFlops %lf \n",d);
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
return(0);
}
|
1bdd193d57f383de5952899b132f2f84e9c7c621.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "l2_loss.cuh"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh"
template <typename T>
__global__ void L2LossKernel(const size_t input_size, const T *input , T *output) {
T ret = 0;
for (size_t id = blockIdx.x * blockDim.x + threadIdx.x; id < input_size; id += blockDim.x * gridDim.x) {
ret = input[id] * input[id];
ret /= static_cast<T>(2);
MsAtomicAdd(output, ret);
}
}
template <typename T>
__global__ void ClearOutputMem(T *output) {
output[0] = static_cast<T>(0);
}
template <typename T>
void L2Loss(const size_t input_size, const T *input , T *output, hipStream_t stream) {
hipLaunchKernelGGL(( ClearOutputMem), dim3(GET_BLOCKS(1)), dim3(GET_THREADS), 0, stream, output);
hipLaunchKernelGGL(( L2LossKernel), dim3(GET_BLOCKS(input_size)), dim3(GET_THREADS), 0, stream, input_size, input, output);
}
template CUDA_LIB_EXPORT void L2Loss<float>(const size_t input_size, const float *input , float *output,
hipStream_t stream);
template CUDA_LIB_EXPORT void L2Loss<half>(const size_t input_size, const half *input , half *output,
hipStream_t stream);
template CUDA_LIB_EXPORT void L2Loss<double>(const size_t input_size, const double *input , double *output,
hipStream_t stream);
| 1bdd193d57f383de5952899b132f2f84e9c7c621.cu | /**
* Copyright 2020-2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "l2_loss.cuh"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh"
template <typename T>
__global__ void L2LossKernel(const size_t input_size, const T *input , T *output) {
T ret = 0;
for (size_t id = blockIdx.x * blockDim.x + threadIdx.x; id < input_size; id += blockDim.x * gridDim.x) {
ret = input[id] * input[id];
ret /= static_cast<T>(2);
MsAtomicAdd(output, ret);
}
}
template <typename T>
__global__ void ClearOutputMem(T *output) {
output[0] = static_cast<T>(0);
}
template <typename T>
void L2Loss(const size_t input_size, const T *input , T *output, cudaStream_t stream) {
ClearOutputMem<<<GET_BLOCKS(1), GET_THREADS, 0, stream>>>(output);
L2LossKernel<<<GET_BLOCKS(input_size), GET_THREADS, 0, stream>>>(input_size, input, output);
}
template CUDA_LIB_EXPORT void L2Loss<float>(const size_t input_size, const float *input , float *output,
cudaStream_t stream);
template CUDA_LIB_EXPORT void L2Loss<half>(const size_t input_size, const half *input , half *output,
cudaStream_t stream);
template CUDA_LIB_EXPORT void L2Loss<double>(const size_t input_size, const double *input , double *output,
cudaStream_t stream);
|
5a030933cc7e2ac562aea518838595f1ff09e66a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cmath>
#include <raft/cudart_utils.h>
#include <thrust/functional.h>
#include <cuml/fil/multi_sum.cuh>
#include <fil/internal.cuh>
#include "common_hip.cuh"
namespace ML {
namespace fil {
// vec wraps float[N] for hipcub::BlockReduce
template <int N, typename T>
struct vec;
template <typename BinaryOp>
struct Vectorized {
BinaryOp op;
__device__ Vectorized(BinaryOp op_) : op(op_) {}
template <int NITEMS, typename T>
constexpr __host__ __device__ __forceinline__ vec<NITEMS, T> operator()(vec<NITEMS, T> a,
vec<NITEMS, T> b) const
{
vec<NITEMS, T> c;
#pragma unroll
for (int i = 0; i < NITEMS; i++)
c[i] = op(a[i], b[i]);
return c;
}
};
template <typename BinaryOp>
constexpr __host__ __device__ Vectorized<BinaryOp> vectorized(BinaryOp op)
{
return op;
}
template <int N, typename T>
struct vec {
static const int NITEMS = N;
T data[N];
explicit __host__ __device__ vec(T t)
{
#pragma unroll
for (int i = 0; i < N; ++i)
data[i] = t;
}
__host__ __device__ vec() : vec(T()) {}
__host__ __device__ T& operator[](int i) { return data[i]; }
__host__ __device__ T operator[](int i) const { return data[i]; }
friend __host__ __device__ vec<N, T> operator+(const vec<N, T>& a, const vec<N, T>& b)
{
return vectorized(hipcub::Sum())(a, b);
}
friend __host__ __device__ void operator+=(vec<N, T>& a, const vec<N, T>& b) { a = a + b; }
template <typename Vec>
friend __host__ __device__ vec<N, T> operator/(vec<N, T>& a, const Vec& b)
{
return vectorized(thrust::divides<T>())(a, vec<N, T>(b));
}
template <typename Vec>
friend __host__ __device__ void operator/=(vec<N, T>& a, const Vec& b)
{
a = a / b;
}
};
struct best_margin_label : hipcub::KeyValuePair<int, float> {
__host__ __device__ best_margin_label(hipcub::KeyValuePair<int, float> pair)
: hipcub::KeyValuePair<int, float>(pair)
{
}
__host__ __device__ best_margin_label(int c = 0, float f = -INFINITY)
: hipcub::KeyValuePair<int, float>({c, f})
{
}
};
template <int NITEMS>
__device__ __forceinline__ vec<NITEMS, best_margin_label> to_vec(int c, vec<NITEMS, float> margin)
{
vec<NITEMS, best_margin_label> ret;
#pragma unroll
for (int i = 0; i < NITEMS; ++i)
ret[i] = best_margin_label(c, margin[i]);
return ret;
}
struct ArgMax {
template <int NITEMS>
__host__ __device__ __forceinline__ vec<NITEMS, best_margin_label> operator()(
vec<NITEMS, best_margin_label> a, vec<NITEMS, best_margin_label> b) const
{
vec<NITEMS, best_margin_label> c;
#pragma unroll
for (int i = 0; i < NITEMS; i++)
c[i] = hipcub::ArgMax()(a[i], b[i]);
return c;
}
};
/** tree_leaf_output returns the leaf outputs from the tree with leaf indices
given by leaves for n_rows items. FULL_ITEMS indicates whether n_rows ==
NITEMS, to allow the compiler to skip the conditional when unrolling the
loop. */
template <typename output_type, bool FULL_NITEMS, int NITEMS, typename tree_type>
__device__ __forceinline__ vec<NITEMS, output_type> tree_leaf_output(tree_type tree,
int n_rows,
int (&leaves)[NITEMS])
{
vec<NITEMS, output_type> out(0);
#pragma unroll
for (int j = 0; j < NITEMS; ++j) {
if (FULL_NITEMS || j < n_rows) {
/** dependent names are not considered templates by default, unless it's a
member of a current [template] instantiation. As output<>() is a
member function inherited from the base class, template
output<output_type>() is required. */
out[j] = tree[leaves[j]].template output<output_type>();
}
}
return out;
}
template <int NITEMS, typename output_type, typename tree_type>
__device__ __forceinline__ vec<NITEMS, output_type> infer_one_tree(tree_type tree,
const float* input,
int cols,
int n_rows)
{
// find the leaf nodes for each row
int curr[NITEMS];
// the first n_rows are active
int mask = (1 << n_rows) - 1;
for (int j = 0; j < NITEMS; ++j)
curr[j] = 0;
do {
#pragma unroll
for (int j = 0; j < NITEMS; ++j) {
auto n = tree[curr[j]];
mask &= ~(n.is_leaf() << j);
if ((mask & (1 << j)) != 0) {
float val = input[j * cols + n.fid()];
bool cond = isnan(val) ? !n.def_left() : val >= n.thresh();
curr[j] = n.left(curr[j]) + cond;
}
}
} while (mask != 0);
// get the output from the leaves
if (n_rows == NITEMS) {
return tree_leaf_output<output_type, true>(tree, n_rows, curr);
} else {
return tree_leaf_output<output_type, false>(tree, n_rows, curr);
}
}
template <typename output_type, typename tree_type>
__device__ __forceinline__ vec<1, output_type> infer_one_tree(tree_type tree,
const float* input,
int cols,
int rows)
{
int curr = 0;
for (;;) {
auto n = tree[curr];
if (n.is_leaf()) break;
float val = input[n.fid()];
bool cond = isnan(val) ? !n.def_left() : val >= n.thresh();
curr = n.left(curr) + cond;
}
vec<1, output_type> out;
/** dependent names are not considered templates by default,
unless it's a member of a current [template] instantiation.**/
out[0] = tree[curr].template output<output_type>();
return out;
}
/**
The shared memory requirements for finalization stage may differ based
on the set of PTX architectures the kernels were compiled for, as well as
the CUDA compute capability of the device chosen for computation.
TODO (levsnv): run a test kernel during forest init to determine the compute capability
chosen for the inference, for an accurate sizeof(BlockReduce::TempStorage),
which is used in determining max NITEMS or max input data columns.
600 is the __CUDA_ARCH__ for Pascal (6.0) GPUs, which is not defined in
host code.
6.0 is the earliest compute capability supported by FIL and RAPIDS in general.
See https://rapids.ai/start.html as well as cmake defaults.
*/
// values below are defaults as of this change.
template <int NITEMS>
size_t block_reduce_footprint_host()
{
return sizeof(
typename hipcub::
BlockReduce<vec<NITEMS, float>, FIL_TPB, hipcub::BLOCK_REDUCE_WARP_REDUCTIONS, 1, 1, 600>::
TempStorage);
}
template <int NITEMS>
size_t block_reduce_best_class_footprint_host()
{
return sizeof(typename hipcub::BlockReduce<vec<NITEMS, best_margin_label>,
FIL_TPB,
hipcub::BLOCK_REDUCE_WARP_REDUCTIONS,
1,
1,
600>::TempStorage);
}
// the device template should achieve the best performance, using up-to-date
// CUB defaults
template <typename T, typename BinaryOp>
__device__ __forceinline__ T block_reduce(T value, BinaryOp op, void* storage)
{
typedef hipcub::BlockReduce<T, FIL_TPB> BlockReduceT;
return BlockReduceT(*(typename BlockReduceT::TempStorage*)storage).Reduce(value, op, blockDim.x);
}
template <int NITEMS,
leaf_algo_t leaf_algo> // = FLOAT_UNARY_BINARY
struct tree_aggregator_t {
vec<NITEMS, float> acc;
void* tmp_storage;
/** shared memory footprint of the accumulator during
the finalization of forest inference kernel, when infer_k output
value is computed.
num_classes is used for other template parameters */
static size_t smem_finalize_footprint(size_t data_row_size,
int num_classes,
int log2_threads_per_tree,
bool predict_proba)
{
return log2_threads_per_tree != 0 ? FIL_TPB * NITEMS * sizeof(float)
: block_reduce_footprint_host<NITEMS>();
}
/** shared memory footprint of the accumulator during
the accumulation of forest inference, when individual trees
are inferred and partial aggregates are accumulated.
num_classes is used for other template parameters */
static size_t smem_accumulate_footprint(int num_classes) { return 0; }
/**
num_classes is used for other template parameters */
__device__ __forceinline__ tree_aggregator_t(predict_params params,
void* accumulate_workspace,
void* finalize_workspace,
float* vector_leaf)
: tmp_storage(finalize_workspace)
{
}
__device__ __forceinline__ void accumulate(vec<NITEMS, float> single_tree_prediction,
int tree,
int thread_num_rows)
{
acc += single_tree_prediction;
}
__device__ __forceinline__ void finalize(float* block_out,
int block_num_rows,
int output_stride,
output_t transform,
int num_trees,
int log2_threads_per_tree)
{
if (FIL_TPB != 1 << log2_threads_per_tree) { // anything to reduce?
// ensure input columns can be overwritten (no threads traversing trees)
__syncthreads();
if (log2_threads_per_tree == 0) {
acc = block_reduce(acc, vectorized(hipcub::Sum()), tmp_storage);
} else {
auto per_thread = (vec<NITEMS, float>*)tmp_storage;
per_thread[threadIdx.x] = acc;
__syncthreads();
// We have two pertinent cases for splitting FIL_TPB == 256 values:
// 1. 2000 columns, which fit few threads/tree in shared memory,
// so ~256 groups. These are the models that will run the slowest.
// multi_sum performance is not sensitive to the radix here.
// 2. 50 columns, so ~32 threads/tree, so ~8 groups. These are the most
// popular.
acc =
multi_sum<5>(per_thread, 1 << log2_threads_per_tree, FIL_TPB >> log2_threads_per_tree);
}
}
if (threadIdx.x * NITEMS >= block_num_rows) return;
#pragma unroll
for (int row = 0; row < NITEMS; ++row) {
int out_preds_i = threadIdx.x * NITEMS + row;
if (out_preds_i < block_num_rows) block_out[out_preds_i * output_stride] = acc[row];
}
}
};
// tmp_storage may overlap shared memory addressed by [begin, end)
// allreduce_shmem ensures no race conditions
template <typename Iterator, typename BinaryOp>
__device__ __forceinline__ auto allreduce_shmem(Iterator begin,
Iterator end,
BinaryOp op,
void* tmp_storage)
{
typedef typename std::iterator_traits<Iterator>::value_type value_type;
value_type thread_partial;
for (Iterator it = begin + threadIdx.x; it < end; it += blockDim.x)
thread_partial = op(thread_partial, *it);
__syncthreads(); // free shared memory [begin, end)
auto res = block_reduce(thread_partial, op, tmp_storage);
// broadcast sum to all threads
__syncthreads(); // free up tmp_storage
if (threadIdx.x == 0) *(value_type*)tmp_storage = res;
__syncthreads();
return *(value_type*)tmp_storage;
}
// *begin and *end shall be struct vec
// tmp_storage may overlap shared memory addressed by [begin, end)
template <typename Iterator>
__device__ __forceinline__ void write_best_class(
Iterator begin, Iterator end, void* tmp_storage, float* out, int num_rows)
{
// reduce per-class candidate margins to one best class candidate
// per thread (for each of the NITEMS rows)
auto best = vec<begin->NITEMS, best_margin_label>();
for (int c = threadIdx.x; c < end - begin; c += blockDim.x)
best = vectorized(hipcub::ArgMax())(best, to_vec(c, begin[c]));
// [begin, end) may overlap tmp_storage
__syncthreads();
// find best class per block (for each of the NITEMS rows)
best = block_reduce(best, vectorized(hipcub::ArgMax()), tmp_storage);
// write it out to global memory
if (threadIdx.x > 0) return;
#pragma unroll
for (int row = 0; row < best.NITEMS; ++row)
if (row < num_rows) out[row] = best[row].key;
}
/// needed for softmax
__device__ float shifted_exp(float margin, float max) { return expf(margin - max); }
// *begin and *end shall be struct vec
// tmp_storage may NOT overlap shared memory addressed by [begin, end)
template <typename Iterator>
__device__ __forceinline__ void block_softmax(Iterator begin, Iterator end, void* tmp_storage)
{
// subtract max before exponentiating for numerical stability
typedef typename std::iterator_traits<Iterator>::value_type value_type;
value_type max = allreduce_shmem(begin, end, vectorized(hipcub::Max()), tmp_storage);
for (Iterator it = begin + threadIdx.x; it < end; it += blockDim.x)
*it = vectorized(shifted_exp)(*it, max);
// sum of exponents
value_type soe = allreduce_shmem(begin, end, vectorized(hipcub::Sum()), tmp_storage);
// softmax phase 2: normalization
for (Iterator it = begin + threadIdx.x; it < end; it += blockDim.x)
*it /= soe;
}
// *begin and *end shall be struct vec
// tmp_storage may NOT overlap shared memory addressed by [begin, end)
template <typename Iterator>
__device__ __forceinline__ void normalize_softmax_and_write(Iterator begin,
Iterator end,
output_t transform,
int trees_per_class,
void* tmp_storage,
float* out,
int num_rows)
{
if ((transform & output_t::AVG) != 0) {
for (Iterator it = begin + threadIdx.x; it < end; it += blockDim.x)
*it /= trees_per_class;
}
if ((transform & output_t::SOFTMAX) != 0) block_softmax(begin, end, tmp_storage);
// write result to global memory
#pragma unroll
for (int row = 0; row < begin->NITEMS; ++row) {
for (int c = threadIdx.x; c < end - begin; c += blockDim.x)
if (row < num_rows) out[row * (end - begin) + c] = begin[c][row];
}
}
// *begin and *end shall be struct vec
// tmp_storage may NOT overlap shared memory addressed by [begin, end)
// in case num_outputs > 1
template <typename Iterator>
__device__ __forceinline__ void class_margins_to_global_memory(Iterator begin,
Iterator end,
output_t transform,
int trees_per_class,
void* tmp_storage,
float* out,
int num_rows,
int num_outputs)
{
if (num_outputs == 1) { // will output class
// reduce per-class candidate margins to one best class candidate
// per thread (for each of the NITEMS rows)
write_best_class(begin, end, tmp_storage, out, num_rows);
} else { // output softmax-ed margin
normalize_softmax_and_write(begin, end, transform, trees_per_class, tmp_storage, out, num_rows);
}
}
template <int NITEMS>
struct tree_aggregator_t<NITEMS, GROVE_PER_CLASS_FEW_CLASSES> {
vec<NITEMS, float> acc;
int num_classes;
vec<NITEMS, float>* per_thread;
void* tmp_storage;
static size_t smem_finalize_footprint(size_t data_row_size,
int num_classes,
int log2_threads_per_tree,
bool predict_proba)
{
size_t phase1 = (FIL_TPB - FIL_TPB % num_classes) * sizeof(vec<NITEMS, float>);
size_t phase2 = predict_proba ? block_reduce_footprint_host<NITEMS>()
: block_reduce_best_class_footprint_host<NITEMS>();
return predict_proba ? phase1 + phase2 : ::max(phase1, phase2);
}
static size_t smem_accumulate_footprint(int num_classes) { return 0; }
__device__ __forceinline__ tree_aggregator_t(predict_params params,
void* accumulate_workspace,
void* finalize_workspace,
float* vector_leaf)
: num_classes(params.num_classes),
per_thread((vec<NITEMS, float>*)finalize_workspace),
tmp_storage(params.predict_proba ? per_thread + num_classes : finalize_workspace)
{
}
__device__ __forceinline__ void accumulate(vec<NITEMS, float> single_tree_prediction,
int tree,
int thread_num_rows)
{
acc += single_tree_prediction;
}
__device__ __forceinline__ void finalize(float* out,
int num_rows,
int num_outputs,
output_t transform,
int num_trees,
int log2_threads_per_tree)
{
__syncthreads(); // free up input row in case it was in shared memory
// load margin into shared memory
per_thread[threadIdx.x] = acc;
__syncthreads();
acc = multi_sum<6>(per_thread, num_classes, blockDim.x / num_classes);
if (threadIdx.x < num_classes) per_thread[threadIdx.x] = acc;
__syncthreads(); // per_thread needs to be fully populated
class_margins_to_global_memory(per_thread,
per_thread + num_classes,
transform,
num_trees / num_classes,
tmp_storage,
out,
num_rows,
num_outputs);
}
};
template <int NITEMS>
struct tree_aggregator_t<NITEMS, GROVE_PER_CLASS_MANY_CLASSES> {
vec<NITEMS, float> acc;
/// at first, per class margin, then, possibly, different softmax partials
vec<NITEMS, float>* per_class_margin;
void* tmp_storage;
int num_classes;
static size_t smem_finalize_footprint(size_t data_row_size,
int num_classes,
int log2_threads_per_tree,
bool predict_proba)
{
size_t phase1 = data_row_size + smem_accumulate_footprint(num_classes);
size_t phase2 = predict_proba ? block_reduce_footprint_host<NITEMS>()
: block_reduce_best_class_footprint_host<NITEMS>();
return predict_proba ? phase1 + phase2 : ::max(phase1, phase2);
}
static __host__ __device__ size_t smem_accumulate_footprint(int num_classes)
{
return num_classes * sizeof(vec<NITEMS, float>);
}
__device__ __forceinline__ tree_aggregator_t(predict_params params,
void* accumulate_workspace,
void* finalize_workspace,
float* vector_leaf)
: per_class_margin((vec<NITEMS, float>*)accumulate_workspace),
tmp_storage(params.predict_proba ? per_class_margin + num_classes : finalize_workspace),
num_classes(params.num_classes)
{
for (int c = threadIdx.x; c < num_classes; c += blockDim.x)
per_class_margin[c] = vec<NITEMS, float>(0);
// __syncthreads() is called in infer_k
}
__device__ __forceinline__ void accumulate(vec<NITEMS, float> single_tree_prediction,
int tree,
int thread_num_rows)
{
// since threads are assigned to consecutive classes, no need for atomics
if (thread_num_rows > 0) { per_class_margin[tree % num_classes] += single_tree_prediction; }
__syncthreads();
}
__device__ __forceinline__ void finalize(float* out,
int num_rows,
int num_outputs,
output_t transform,
int num_trees,
int log2_threads_per_tree)
{
class_margins_to_global_memory(per_class_margin,
per_class_margin + num_classes,
transform,
num_trees / num_classes,
tmp_storage,
out,
num_rows,
num_outputs);
}
};
template <int NITEMS>
struct tree_aggregator_t<NITEMS, VECTOR_LEAF> {
// per_class_margin is a row-major matrix
// of size num_threads_per_class * num_classes
// used to acccumulate class values
vec<NITEMS, float>* per_class_margin;
vec<NITEMS, int>* vector_leaf_indices;
int* thread_num_rows;
int num_classes;
int num_threads_per_class;
float* vector_leaf;
void* tmp_storage;
static size_t smem_finalize_footprint(size_t data_row_size,
int num_classes,
int log2_threads_per_tree,
bool predict_proba)
{
size_t phase1 = data_row_size + smem_accumulate_footprint(num_classes);
size_t phase2 = predict_proba ? block_reduce_footprint_host<NITEMS>()
: block_reduce_best_class_footprint_host<NITEMS>();
return predict_proba ? phase1 + phase2 : ::max(phase1, phase2);
}
static size_t smem_accumulate_footprint(int num_classes)
{
return sizeof(vec<NITEMS, float>) * num_classes * max(1, FIL_TPB / num_classes) +
sizeof(vec<NITEMS, int>) * FIL_TPB + sizeof(int) * FIL_TPB;
}
__device__ __forceinline__ tree_aggregator_t(predict_params params,
void* accumulate_workspace,
void* finalize_workspace,
float* vector_leaf)
: num_classes(params.num_classes),
num_threads_per_class(max(1, blockDim.x / params.num_classes)),
vector_leaf(vector_leaf),
tmp_storage(finalize_workspace)
{
// Assign workspace
char* ptr = (char*)accumulate_workspace;
per_class_margin = (vec<NITEMS, float>*)ptr;
ptr += sizeof(vec<NITEMS, float>) * num_classes * num_threads_per_class;
vector_leaf_indices = (vec<NITEMS, int>*)ptr;
ptr += sizeof(vec<NITEMS, int>) * blockDim.x;
thread_num_rows = (int*)ptr;
// Initialise shared memory
for (int i = threadIdx.x; i < num_classes * num_threads_per_class; i += blockDim.x) {
per_class_margin[i] = vec<NITEMS, float>();
}
vector_leaf_indices[threadIdx.x] = vec<NITEMS, int>();
thread_num_rows[threadIdx.x] = 0;
// __syncthreads() is called in infer_k
}
__device__ __forceinline__ void accumulate(vec<NITEMS, int> single_tree_prediction,
int tree,
int num_rows)
{
// Perform a transpose in shared memory
// Assign each thread to a class, so they can accumulate without atomics
__syncthreads();
// Write indices to shared memory
vector_leaf_indices[threadIdx.x] = single_tree_prediction;
thread_num_rows[threadIdx.x] = num_rows;
__syncthreads();
// i here refers to each element of the matrix per_class_margin
for (int i = threadIdx.x; i < num_classes * num_threads_per_class; i += blockDim.x) {
// if num_threads_per_class == 1, then c == i
int c = i % num_classes;
// iterate over original thread inputs with stride num_threads_per_class
// j is the original thread input
// we have num_classes threads for each j
for (int j = i / num_classes; j < blockDim.x; j += num_threads_per_class) {
for (int item = 0; item < thread_num_rows[j]; ++item) {
float pred = vector_leaf[vector_leaf_indices[j][item] * num_classes + c];
per_class_margin[i][item] += pred;
}
}
}
}
__device__ __forceinline__ void finalize(float* out,
int num_rows,
int num_outputs,
output_t transform,
int num_trees,
int log2_threads_per_tree)
{
if (num_classes < blockDim.x) {
__syncthreads();
// Efficient implementation for small number of classes
auto acc = multi_sum<6>(per_class_margin, num_classes, max(1, blockDim.x / num_classes));
if (threadIdx.x < num_classes) per_class_margin[threadIdx.x] = acc;
__syncthreads();
}
class_margins_to_global_memory(per_class_margin,
per_class_margin + num_classes,
transform,
num_trees,
tmp_storage,
out,
num_rows,
num_outputs);
}
};
template <int NITEMS>
struct tree_aggregator_t<NITEMS, CATEGORICAL_LEAF> {
// could switch to uint16_t to save shared memory
// provided raft::myAtomicAdd(short*) simulated with appropriate shifts
int* votes;
int num_classes;
static size_t smem_finalize_footprint(size_t data_row_size,
int num_classes,
int log2_threads_per_tree,
bool predict_proba)
{
// not accounting for lingering accumulate_footprint during finalize()
return 0;
}
static size_t smem_accumulate_footprint(int num_classes)
{
return sizeof(int) * num_classes * NITEMS;
}
__device__ __forceinline__ tree_aggregator_t(predict_params params,
void* accumulate_workspace,
void* finalize_workspace,
float* vector_leaf)
: num_classes(params.num_classes), votes((int*)accumulate_workspace)
{
for (int c = threadIdx.x; c < num_classes; c += FIL_TPB * NITEMS)
#pragma unroll
for (int item = 0; item < NITEMS; ++item)
votes[c * NITEMS + item] = 0;
// __syncthreads() is called in infer_k
}
__device__ __forceinline__ void accumulate(vec<NITEMS, int> single_tree_prediction,
int tree,
int thread_num_rows)
{
if (thread_num_rows == 0) return;
#pragma unroll
for (int item = 0; item < NITEMS; ++item) {
raft::myAtomicAdd(votes + single_tree_prediction[item] * NITEMS + item, 1);
}
}
// class probabilities or regression. for regression, num_classes
// is just the number of outputs for each data instance
__device__ __forceinline__ void finalize_multiple_outputs(float* out, int num_rows)
{
__syncthreads();
for (int c = threadIdx.x; c < num_classes; c += blockDim.x) {
#pragma unroll
for (int row = 0; row < num_rows; ++row)
out[row * num_classes + c] = votes[c * NITEMS + row];
}
}
// using this when predicting a single class label, as opposed to sparse class vector
// or class probabilities or regression
__device__ __forceinline__ void finalize_class_label(float* out, int num_rows)
{
__syncthreads(); // make sure all votes[] are final
int item = threadIdx.x;
int row = item;
if (item < NITEMS && row < num_rows) {
int max_votes = 0;
int best_class = 0;
for (int c = 0; c < num_classes; ++c) {
if (votes[c * NITEMS + item] > max_votes) {
max_votes = votes[c * NITEMS + item];
best_class = c;
}
}
out[row] = best_class;
}
}
__device__ __forceinline__ void finalize(float* out,
int num_rows,
int num_outputs,
output_t transform,
int num_trees,
int log2_threads_per_tree)
{
if (num_outputs > 1) {
// only supporting num_outputs == num_classes
finalize_multiple_outputs(out, num_rows);
} else {
finalize_class_label(out, num_rows);
}
}
};
template <int NITEMS, leaf_algo_t leaf_algo, bool cols_in_shmem, class storage_type>
__global__ void infer_k(storage_type forest, predict_params params)
{
extern __shared__ char smem[];
float* sdata = (float*)smem;
int sdata_stride = params.sdata_stride();
int rows_per_block = NITEMS << params.log2_threads_per_tree;
int num_cols = params.num_cols;
int thread_row0 = NITEMS * modpow2(threadIdx.x, params.log2_threads_per_tree);
for (int64_t block_row0 = blockIdx.x * rows_per_block; block_row0 < params.num_rows;
block_row0 += rows_per_block * gridDim.x) {
int block_num_rows =
max(0, (int)min((int64_t)rows_per_block, (int64_t)params.num_rows - block_row0));
const float* block_input = params.data + block_row0 * num_cols;
if (cols_in_shmem) {
// cache the row for all threads to reuse
// 2021: latest SMs still do not have >256KiB of shared memory/block required to
// exceed the uint16_t
#pragma unroll
for (uint16_t input_idx = threadIdx.x; input_idx < block_num_rows * num_cols;
input_idx += blockDim.x) {
// for even num_cols, we need to pad sdata_stride to reduce bank conflicts
// assuming here that sdata_stride == num_cols + 1
// then, idx / num_cols * sdata_stride + idx % num_cols == idx + idx / num_cols
uint16_t sdata_idx =
sdata_stride == num_cols ? input_idx : input_idx + input_idx / (uint16_t)num_cols;
sdata[sdata_idx] = block_input[input_idx];
}
#pragma unroll
for (int idx = block_num_rows * sdata_stride; idx < rows_per_block * sdata_stride;
idx += blockDim.x)
sdata[idx] = 0.0f;
}
tree_aggregator_t<NITEMS, leaf_algo> acc(
params, (char*)sdata + params.cols_shmem_size(), sdata, forest.vector_leaf_);
__syncthreads(); // for both row cache init and acc init
// one block works on NITEMS * threads_per_tree rows and the whole forest
// one thread works on NITEMS rows
int thread_tree0 = threadIdx.x >> params.log2_threads_per_tree;
int tree_stride = blockDim.x >> params.log2_threads_per_tree;
int thread_num_rows = max(0, min(NITEMS, block_num_rows - thread_row0));
for (int tree = thread_tree0; tree - thread_tree0 < forest.num_trees(); tree += tree_stride) {
/* tree - thread_tree0 < forest.num_trees() is a necessary but block-uniform
condition for "tree < forest.num_trees()". It lets use __syncthreads()
and is made exact below.
Same with thread_num_rows > 0
*/
typedef typename leaf_output_t<leaf_algo>::T pred_t;
vec<NITEMS, pred_t> prediction;
if (tree < forest.num_trees() && thread_num_rows != 0) {
prediction = infer_one_tree<NITEMS, pred_t>(
forest[tree],
cols_in_shmem ? sdata + thread_row0 * sdata_stride : block_input + thread_row0 * num_cols,
cols_in_shmem ? sdata_stride : num_cols,
cols_in_shmem ? NITEMS : thread_num_rows);
}
// All threads must enter accumulate
// Dummy threads can be marked as having 0 rows
acc.accumulate(prediction, tree, tree < forest.num_trees() ? thread_num_rows : 0);
}
acc.finalize(params.preds + params.num_outputs * block_row0,
block_num_rows,
params.num_outputs,
params.transform,
forest.num_trees(),
params.log2_threads_per_tree);
__syncthreads(); // free up acc's shared memory resources for next row set
}
}
template <int NITEMS, leaf_algo_t leaf_algo>
size_t shmem_size_params::get_smem_footprint()
{
size_t finalize_footprint = tree_aggregator_t<NITEMS, leaf_algo>::smem_finalize_footprint(
cols_shmem_size(), num_classes, log2_threads_per_tree, predict_proba);
size_t accumulate_footprint =
tree_aggregator_t<NITEMS, leaf_algo>::smem_accumulate_footprint(num_classes) +
cols_shmem_size();
return ::max(accumulate_footprint, finalize_footprint);
}
template <int NITEMS>
size_t shmem_size_params::get_smem_footprint()
{
switch (leaf_algo) {
case FLOAT_UNARY_BINARY: return get_smem_footprint<NITEMS, FLOAT_UNARY_BINARY>();
case CATEGORICAL_LEAF: return get_smem_footprint<NITEMS, CATEGORICAL_LEAF>();
case GROVE_PER_CLASS:
if (num_classes > FIL_TPB) return get_smem_footprint<NITEMS, GROVE_PER_CLASS_MANY_CLASSES>();
return get_smem_footprint<NITEMS, GROVE_PER_CLASS_FEW_CLASSES>();
case VECTOR_LEAF: return get_smem_footprint<NITEMS, VECTOR_LEAF>();
default: ASSERT(false, "internal error: unexpected leaf_algo_t");
}
}
void shmem_size_params::compute_smem_footprint()
{
switch (n_items) {
case 1: shm_sz = get_smem_footprint<1>(); break;
case 2: shm_sz = get_smem_footprint<2>(); break;
case 3: shm_sz = get_smem_footprint<3>(); break;
case 4: shm_sz = get_smem_footprint<4>(); break;
default: ASSERT(false, "internal error: n_items > 4");
}
}
template <leaf_algo_t leaf_algo, bool cols_in_shmem, typename storage_type>
void infer_k_nitems_launcher(storage_type forest,
predict_params params,
hipStream_t stream,
int block_dim_x)
{
switch (params.n_items) {
case 1:
hipLaunchKernelGGL(( infer_k<1, leaf_algo, cols_in_shmem>)
, dim3(params.num_blocks), dim3(block_dim_x), params.shm_sz, stream, forest, params);
break;
case 2:
hipLaunchKernelGGL(( infer_k<2, leaf_algo, cols_in_shmem>)
, dim3(params.num_blocks), dim3(block_dim_x), params.shm_sz, stream, forest, params);
break;
case 3:
hipLaunchKernelGGL(( infer_k<3, leaf_algo, cols_in_shmem>)
, dim3(params.num_blocks), dim3(block_dim_x), params.shm_sz, stream, forest, params);
break;
case 4:
hipLaunchKernelGGL(( infer_k<4, leaf_algo, cols_in_shmem>)
, dim3(params.num_blocks), dim3(block_dim_x), params.shm_sz, stream, forest, params);
break;
default: ASSERT(false, "internal error: nitems > 4");
}
CUDA_CHECK(hipPeekAtLastError());
}
template <leaf_algo_t leaf_algo, typename storage_type>
void infer_k_launcher(storage_type forest,
predict_params params,
hipStream_t stream,
int blockdim_x)
{
params.num_blocks = params.num_blocks != 0 ? params.num_blocks
: raft::ceildiv(int(params.num_rows), params.n_items);
if (params.cols_in_shmem) {
infer_k_nitems_launcher<leaf_algo, true>(forest, params, stream, blockdim_x);
} else {
infer_k_nitems_launcher<leaf_algo, false>(forest, params, stream, blockdim_x);
}
}
template <typename storage_type>
void infer(storage_type forest, predict_params params, hipStream_t stream)
{
switch (params.leaf_algo) {
case FLOAT_UNARY_BINARY:
infer_k_launcher<FLOAT_UNARY_BINARY>(forest, params, stream, FIL_TPB);
break;
case GROVE_PER_CLASS:
if (params.num_classes > FIL_TPB) {
params.leaf_algo = GROVE_PER_CLASS_MANY_CLASSES;
infer_k_launcher<GROVE_PER_CLASS_MANY_CLASSES>(forest, params, stream, FIL_TPB);
} else {
params.leaf_algo = GROVE_PER_CLASS_FEW_CLASSES;
infer_k_launcher<GROVE_PER_CLASS_FEW_CLASSES>(
forest, params, stream, FIL_TPB - FIL_TPB % params.num_classes);
}
break;
case CATEGORICAL_LEAF:
infer_k_launcher<CATEGORICAL_LEAF>(forest, params, stream, FIL_TPB);
break;
case VECTOR_LEAF: infer_k_launcher<VECTOR_LEAF>(forest, params, stream, FIL_TPB); break;
default: ASSERT(false, "internal error: invalid leaf_algo");
}
}
template void infer<dense_storage>(dense_storage forest,
predict_params params,
hipStream_t stream);
template void infer<sparse_storage16>(sparse_storage16 forest,
predict_params params,
hipStream_t stream);
template void infer<sparse_storage8>(sparse_storage8 forest,
predict_params params,
hipStream_t stream);
} // namespace fil
} // namespace ML
| 5a030933cc7e2ac562aea518838595f1ff09e66a.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <algorithm>
#include <cmath>
#include <raft/cudart_utils.h>
#include <thrust/functional.h>
#include <cuml/fil/multi_sum.cuh>
#include <fil/internal.cuh>
#include "common.cuh"
namespace ML {
namespace fil {
// vec wraps float[N] for cub::BlockReduce
template <int N, typename T>
struct vec;
template <typename BinaryOp>
struct Vectorized {
BinaryOp op;
__device__ Vectorized(BinaryOp op_) : op(op_) {}
template <int NITEMS, typename T>
constexpr __host__ __device__ __forceinline__ vec<NITEMS, T> operator()(vec<NITEMS, T> a,
vec<NITEMS, T> b) const
{
vec<NITEMS, T> c;
#pragma unroll
for (int i = 0; i < NITEMS; i++)
c[i] = op(a[i], b[i]);
return c;
}
};
template <typename BinaryOp>
constexpr __host__ __device__ Vectorized<BinaryOp> vectorized(BinaryOp op)
{
return op;
}
template <int N, typename T>
struct vec {
static const int NITEMS = N;
T data[N];
explicit __host__ __device__ vec(T t)
{
#pragma unroll
for (int i = 0; i < N; ++i)
data[i] = t;
}
__host__ __device__ vec() : vec(T()) {}
__host__ __device__ T& operator[](int i) { return data[i]; }
__host__ __device__ T operator[](int i) const { return data[i]; }
friend __host__ __device__ vec<N, T> operator+(const vec<N, T>& a, const vec<N, T>& b)
{
return vectorized(cub::Sum())(a, b);
}
friend __host__ __device__ void operator+=(vec<N, T>& a, const vec<N, T>& b) { a = a + b; }
template <typename Vec>
friend __host__ __device__ vec<N, T> operator/(vec<N, T>& a, const Vec& b)
{
return vectorized(thrust::divides<T>())(a, vec<N, T>(b));
}
template <typename Vec>
friend __host__ __device__ void operator/=(vec<N, T>& a, const Vec& b)
{
a = a / b;
}
};
struct best_margin_label : cub::KeyValuePair<int, float> {
__host__ __device__ best_margin_label(cub::KeyValuePair<int, float> pair)
: cub::KeyValuePair<int, float>(pair)
{
}
__host__ __device__ best_margin_label(int c = 0, float f = -INFINITY)
: cub::KeyValuePair<int, float>({c, f})
{
}
};
template <int NITEMS>
__device__ __forceinline__ vec<NITEMS, best_margin_label> to_vec(int c, vec<NITEMS, float> margin)
{
vec<NITEMS, best_margin_label> ret;
#pragma unroll
for (int i = 0; i < NITEMS; ++i)
ret[i] = best_margin_label(c, margin[i]);
return ret;
}
struct ArgMax {
template <int NITEMS>
__host__ __device__ __forceinline__ vec<NITEMS, best_margin_label> operator()(
vec<NITEMS, best_margin_label> a, vec<NITEMS, best_margin_label> b) const
{
vec<NITEMS, best_margin_label> c;
#pragma unroll
for (int i = 0; i < NITEMS; i++)
c[i] = cub::ArgMax()(a[i], b[i]);
return c;
}
};
/** tree_leaf_output returns the leaf outputs from the tree with leaf indices
given by leaves for n_rows items. FULL_ITEMS indicates whether n_rows ==
NITEMS, to allow the compiler to skip the conditional when unrolling the
loop. */
template <typename output_type, bool FULL_NITEMS, int NITEMS, typename tree_type>
__device__ __forceinline__ vec<NITEMS, output_type> tree_leaf_output(tree_type tree,
int n_rows,
int (&leaves)[NITEMS])
{
vec<NITEMS, output_type> out(0);
#pragma unroll
for (int j = 0; j < NITEMS; ++j) {
if (FULL_NITEMS || j < n_rows) {
/** dependent names are not considered templates by default, unless it's a
member of a current [template] instantiation. As output<>() is a
member function inherited from the base class, template
output<output_type>() is required. */
out[j] = tree[leaves[j]].template output<output_type>();
}
}
return out;
}
template <int NITEMS, typename output_type, typename tree_type>
__device__ __forceinline__ vec<NITEMS, output_type> infer_one_tree(tree_type tree,
const float* input,
int cols,
int n_rows)
{
// find the leaf nodes for each row
int curr[NITEMS];
// the first n_rows are active
int mask = (1 << n_rows) - 1;
for (int j = 0; j < NITEMS; ++j)
curr[j] = 0;
do {
#pragma unroll
for (int j = 0; j < NITEMS; ++j) {
auto n = tree[curr[j]];
mask &= ~(n.is_leaf() << j);
if ((mask & (1 << j)) != 0) {
float val = input[j * cols + n.fid()];
bool cond = isnan(val) ? !n.def_left() : val >= n.thresh();
curr[j] = n.left(curr[j]) + cond;
}
}
} while (mask != 0);
// get the output from the leaves
if (n_rows == NITEMS) {
return tree_leaf_output<output_type, true>(tree, n_rows, curr);
} else {
return tree_leaf_output<output_type, false>(tree, n_rows, curr);
}
}
template <typename output_type, typename tree_type>
__device__ __forceinline__ vec<1, output_type> infer_one_tree(tree_type tree,
const float* input,
int cols,
int rows)
{
int curr = 0;
for (;;) {
auto n = tree[curr];
if (n.is_leaf()) break;
float val = input[n.fid()];
bool cond = isnan(val) ? !n.def_left() : val >= n.thresh();
curr = n.left(curr) + cond;
}
vec<1, output_type> out;
/** dependent names are not considered templates by default,
unless it's a member of a current [template] instantiation.**/
out[0] = tree[curr].template output<output_type>();
return out;
}
/**
The shared memory requirements for finalization stage may differ based
on the set of PTX architectures the kernels were compiled for, as well as
the CUDA compute capability of the device chosen for computation.
TODO (levsnv): run a test kernel during forest init to determine the compute capability
chosen for the inference, for an accurate sizeof(BlockReduce::TempStorage),
which is used in determining max NITEMS or max input data columns.
600 is the __CUDA_ARCH__ for Pascal (6.0) GPUs, which is not defined in
host code.
6.0 is the earliest compute capability supported by FIL and RAPIDS in general.
See https://rapids.ai/start.html as well as cmake defaults.
*/
// values below are defaults as of this change.
template <int NITEMS>
size_t block_reduce_footprint_host()
{
return sizeof(
typename cub::
BlockReduce<vec<NITEMS, float>, FIL_TPB, cub::BLOCK_REDUCE_WARP_REDUCTIONS, 1, 1, 600>::
TempStorage);
}
template <int NITEMS>
size_t block_reduce_best_class_footprint_host()
{
return sizeof(typename cub::BlockReduce<vec<NITEMS, best_margin_label>,
FIL_TPB,
cub::BLOCK_REDUCE_WARP_REDUCTIONS,
1,
1,
600>::TempStorage);
}
// the device template should achieve the best performance, using up-to-date
// CUB defaults
template <typename T, typename BinaryOp>
__device__ __forceinline__ T block_reduce(T value, BinaryOp op, void* storage)
{
typedef cub::BlockReduce<T, FIL_TPB> BlockReduceT;
return BlockReduceT(*(typename BlockReduceT::TempStorage*)storage).Reduce(value, op, blockDim.x);
}
template <int NITEMS,
leaf_algo_t leaf_algo> // = FLOAT_UNARY_BINARY
struct tree_aggregator_t {
vec<NITEMS, float> acc;
void* tmp_storage;
/** shared memory footprint of the accumulator during
the finalization of forest inference kernel, when infer_k output
value is computed.
num_classes is used for other template parameters */
static size_t smem_finalize_footprint(size_t data_row_size,
int num_classes,
int log2_threads_per_tree,
bool predict_proba)
{
return log2_threads_per_tree != 0 ? FIL_TPB * NITEMS * sizeof(float)
: block_reduce_footprint_host<NITEMS>();
}
/** shared memory footprint of the accumulator during
the accumulation of forest inference, when individual trees
are inferred and partial aggregates are accumulated.
num_classes is used for other template parameters */
static size_t smem_accumulate_footprint(int num_classes) { return 0; }
/**
num_classes is used for other template parameters */
__device__ __forceinline__ tree_aggregator_t(predict_params params,
void* accumulate_workspace,
void* finalize_workspace,
float* vector_leaf)
: tmp_storage(finalize_workspace)
{
}
__device__ __forceinline__ void accumulate(vec<NITEMS, float> single_tree_prediction,
int tree,
int thread_num_rows)
{
acc += single_tree_prediction;
}
__device__ __forceinline__ void finalize(float* block_out,
int block_num_rows,
int output_stride,
output_t transform,
int num_trees,
int log2_threads_per_tree)
{
if (FIL_TPB != 1 << log2_threads_per_tree) { // anything to reduce?
// ensure input columns can be overwritten (no threads traversing trees)
__syncthreads();
if (log2_threads_per_tree == 0) {
acc = block_reduce(acc, vectorized(cub::Sum()), tmp_storage);
} else {
auto per_thread = (vec<NITEMS, float>*)tmp_storage;
per_thread[threadIdx.x] = acc;
__syncthreads();
// We have two pertinent cases for splitting FIL_TPB == 256 values:
// 1. 2000 columns, which fit few threads/tree in shared memory,
// so ~256 groups. These are the models that will run the slowest.
// multi_sum performance is not sensitive to the radix here.
// 2. 50 columns, so ~32 threads/tree, so ~8 groups. These are the most
// popular.
acc =
multi_sum<5>(per_thread, 1 << log2_threads_per_tree, FIL_TPB >> log2_threads_per_tree);
}
}
if (threadIdx.x * NITEMS >= block_num_rows) return;
#pragma unroll
for (int row = 0; row < NITEMS; ++row) {
int out_preds_i = threadIdx.x * NITEMS + row;
if (out_preds_i < block_num_rows) block_out[out_preds_i * output_stride] = acc[row];
}
}
};
// tmp_storage may overlap shared memory addressed by [begin, end)
// allreduce_shmem ensures no race conditions
template <typename Iterator, typename BinaryOp>
__device__ __forceinline__ auto allreduce_shmem(Iterator begin,
Iterator end,
BinaryOp op,
void* tmp_storage)
{
typedef typename std::iterator_traits<Iterator>::value_type value_type;
value_type thread_partial;
for (Iterator it = begin + threadIdx.x; it < end; it += blockDim.x)
thread_partial = op(thread_partial, *it);
__syncthreads(); // free shared memory [begin, end)
auto res = block_reduce(thread_partial, op, tmp_storage);
// broadcast sum to all threads
__syncthreads(); // free up tmp_storage
if (threadIdx.x == 0) *(value_type*)tmp_storage = res;
__syncthreads();
return *(value_type*)tmp_storage;
}
// *begin and *end shall be struct vec
// tmp_storage may overlap shared memory addressed by [begin, end)
template <typename Iterator>
__device__ __forceinline__ void write_best_class(
Iterator begin, Iterator end, void* tmp_storage, float* out, int num_rows)
{
// reduce per-class candidate margins to one best class candidate
// per thread (for each of the NITEMS rows)
auto best = vec<begin->NITEMS, best_margin_label>();
for (int c = threadIdx.x; c < end - begin; c += blockDim.x)
best = vectorized(cub::ArgMax())(best, to_vec(c, begin[c]));
// [begin, end) may overlap tmp_storage
__syncthreads();
// find best class per block (for each of the NITEMS rows)
best = block_reduce(best, vectorized(cub::ArgMax()), tmp_storage);
// write it out to global memory
if (threadIdx.x > 0) return;
#pragma unroll
for (int row = 0; row < best.NITEMS; ++row)
if (row < num_rows) out[row] = best[row].key;
}
/// needed for softmax
__device__ float shifted_exp(float margin, float max) { return expf(margin - max); }
// *begin and *end shall be struct vec
// tmp_storage may NOT overlap shared memory addressed by [begin, end)
template <typename Iterator>
__device__ __forceinline__ void block_softmax(Iterator begin, Iterator end, void* tmp_storage)
{
// subtract max before exponentiating for numerical stability
typedef typename std::iterator_traits<Iterator>::value_type value_type;
value_type max = allreduce_shmem(begin, end, vectorized(cub::Max()), tmp_storage);
for (Iterator it = begin + threadIdx.x; it < end; it += blockDim.x)
*it = vectorized(shifted_exp)(*it, max);
// sum of exponents
value_type soe = allreduce_shmem(begin, end, vectorized(cub::Sum()), tmp_storage);
// softmax phase 2: normalization
for (Iterator it = begin + threadIdx.x; it < end; it += blockDim.x)
*it /= soe;
}
// *begin and *end shall be struct vec
// tmp_storage may NOT overlap shared memory addressed by [begin, end)
template <typename Iterator>
__device__ __forceinline__ void normalize_softmax_and_write(Iterator begin,
Iterator end,
output_t transform,
int trees_per_class,
void* tmp_storage,
float* out,
int num_rows)
{
if ((transform & output_t::AVG) != 0) {
for (Iterator it = begin + threadIdx.x; it < end; it += blockDim.x)
*it /= trees_per_class;
}
if ((transform & output_t::SOFTMAX) != 0) block_softmax(begin, end, tmp_storage);
// write result to global memory
#pragma unroll
for (int row = 0; row < begin->NITEMS; ++row) {
for (int c = threadIdx.x; c < end - begin; c += blockDim.x)
if (row < num_rows) out[row * (end - begin) + c] = begin[c][row];
}
}
// *begin and *end shall be struct vec
// tmp_storage may NOT overlap shared memory addressed by [begin, end)
// in case num_outputs > 1
template <typename Iterator>
__device__ __forceinline__ void class_margins_to_global_memory(Iterator begin,
Iterator end,
output_t transform,
int trees_per_class,
void* tmp_storage,
float* out,
int num_rows,
int num_outputs)
{
if (num_outputs == 1) { // will output class
// reduce per-class candidate margins to one best class candidate
// per thread (for each of the NITEMS rows)
write_best_class(begin, end, tmp_storage, out, num_rows);
} else { // output softmax-ed margin
normalize_softmax_and_write(begin, end, transform, trees_per_class, tmp_storage, out, num_rows);
}
}
template <int NITEMS>
struct tree_aggregator_t<NITEMS, GROVE_PER_CLASS_FEW_CLASSES> {
vec<NITEMS, float> acc;
int num_classes;
vec<NITEMS, float>* per_thread;
void* tmp_storage;
static size_t smem_finalize_footprint(size_t data_row_size,
int num_classes,
int log2_threads_per_tree,
bool predict_proba)
{
size_t phase1 = (FIL_TPB - FIL_TPB % num_classes) * sizeof(vec<NITEMS, float>);
size_t phase2 = predict_proba ? block_reduce_footprint_host<NITEMS>()
: block_reduce_best_class_footprint_host<NITEMS>();
return predict_proba ? phase1 + phase2 : std::max(phase1, phase2);
}
static size_t smem_accumulate_footprint(int num_classes) { return 0; }
__device__ __forceinline__ tree_aggregator_t(predict_params params,
void* accumulate_workspace,
void* finalize_workspace,
float* vector_leaf)
: num_classes(params.num_classes),
per_thread((vec<NITEMS, float>*)finalize_workspace),
tmp_storage(params.predict_proba ? per_thread + num_classes : finalize_workspace)
{
}
__device__ __forceinline__ void accumulate(vec<NITEMS, float> single_tree_prediction,
int tree,
int thread_num_rows)
{
acc += single_tree_prediction;
}
__device__ __forceinline__ void finalize(float* out,
int num_rows,
int num_outputs,
output_t transform,
int num_trees,
int log2_threads_per_tree)
{
__syncthreads(); // free up input row in case it was in shared memory
// load margin into shared memory
per_thread[threadIdx.x] = acc;
__syncthreads();
acc = multi_sum<6>(per_thread, num_classes, blockDim.x / num_classes);
if (threadIdx.x < num_classes) per_thread[threadIdx.x] = acc;
__syncthreads(); // per_thread needs to be fully populated
class_margins_to_global_memory(per_thread,
per_thread + num_classes,
transform,
num_trees / num_classes,
tmp_storage,
out,
num_rows,
num_outputs);
}
};
template <int NITEMS>
struct tree_aggregator_t<NITEMS, GROVE_PER_CLASS_MANY_CLASSES> {
vec<NITEMS, float> acc;
/// at first, per class margin, then, possibly, different softmax partials
vec<NITEMS, float>* per_class_margin;
void* tmp_storage;
int num_classes;
static size_t smem_finalize_footprint(size_t data_row_size,
int num_classes,
int log2_threads_per_tree,
bool predict_proba)
{
size_t phase1 = data_row_size + smem_accumulate_footprint(num_classes);
size_t phase2 = predict_proba ? block_reduce_footprint_host<NITEMS>()
: block_reduce_best_class_footprint_host<NITEMS>();
return predict_proba ? phase1 + phase2 : std::max(phase1, phase2);
}
static __host__ __device__ size_t smem_accumulate_footprint(int num_classes)
{
return num_classes * sizeof(vec<NITEMS, float>);
}
__device__ __forceinline__ tree_aggregator_t(predict_params params,
void* accumulate_workspace,
void* finalize_workspace,
float* vector_leaf)
: per_class_margin((vec<NITEMS, float>*)accumulate_workspace),
tmp_storage(params.predict_proba ? per_class_margin + num_classes : finalize_workspace),
num_classes(params.num_classes)
{
for (int c = threadIdx.x; c < num_classes; c += blockDim.x)
per_class_margin[c] = vec<NITEMS, float>(0);
// __syncthreads() is called in infer_k
}
__device__ __forceinline__ void accumulate(vec<NITEMS, float> single_tree_prediction,
int tree,
int thread_num_rows)
{
// since threads are assigned to consecutive classes, no need for atomics
if (thread_num_rows > 0) { per_class_margin[tree % num_classes] += single_tree_prediction; }
__syncthreads();
}
__device__ __forceinline__ void finalize(float* out,
int num_rows,
int num_outputs,
output_t transform,
int num_trees,
int log2_threads_per_tree)
{
class_margins_to_global_memory(per_class_margin,
per_class_margin + num_classes,
transform,
num_trees / num_classes,
tmp_storage,
out,
num_rows,
num_outputs);
}
};
template <int NITEMS>
struct tree_aggregator_t<NITEMS, VECTOR_LEAF> {
// per_class_margin is a row-major matrix
// of size num_threads_per_class * num_classes
// used to acccumulate class values
vec<NITEMS, float>* per_class_margin;
vec<NITEMS, int>* vector_leaf_indices;
int* thread_num_rows;
int num_classes;
int num_threads_per_class;
float* vector_leaf;
void* tmp_storage;
static size_t smem_finalize_footprint(size_t data_row_size,
int num_classes,
int log2_threads_per_tree,
bool predict_proba)
{
size_t phase1 = data_row_size + smem_accumulate_footprint(num_classes);
size_t phase2 = predict_proba ? block_reduce_footprint_host<NITEMS>()
: block_reduce_best_class_footprint_host<NITEMS>();
return predict_proba ? phase1 + phase2 : std::max(phase1, phase2);
}
static size_t smem_accumulate_footprint(int num_classes)
{
return sizeof(vec<NITEMS, float>) * num_classes * max(1, FIL_TPB / num_classes) +
sizeof(vec<NITEMS, int>) * FIL_TPB + sizeof(int) * FIL_TPB;
}
__device__ __forceinline__ tree_aggregator_t(predict_params params,
void* accumulate_workspace,
void* finalize_workspace,
float* vector_leaf)
: num_classes(params.num_classes),
num_threads_per_class(max(1, blockDim.x / params.num_classes)),
vector_leaf(vector_leaf),
tmp_storage(finalize_workspace)
{
// Assign workspace
char* ptr = (char*)accumulate_workspace;
per_class_margin = (vec<NITEMS, float>*)ptr;
ptr += sizeof(vec<NITEMS, float>) * num_classes * num_threads_per_class;
vector_leaf_indices = (vec<NITEMS, int>*)ptr;
ptr += sizeof(vec<NITEMS, int>) * blockDim.x;
thread_num_rows = (int*)ptr;
// Initialise shared memory
for (int i = threadIdx.x; i < num_classes * num_threads_per_class; i += blockDim.x) {
per_class_margin[i] = vec<NITEMS, float>();
}
vector_leaf_indices[threadIdx.x] = vec<NITEMS, int>();
thread_num_rows[threadIdx.x] = 0;
// __syncthreads() is called in infer_k
}
__device__ __forceinline__ void accumulate(vec<NITEMS, int> single_tree_prediction,
int tree,
int num_rows)
{
// Perform a transpose in shared memory
// Assign each thread to a class, so they can accumulate without atomics
__syncthreads();
// Write indices to shared memory
vector_leaf_indices[threadIdx.x] = single_tree_prediction;
thread_num_rows[threadIdx.x] = num_rows;
__syncthreads();
// i here refers to each element of the matrix per_class_margin
for (int i = threadIdx.x; i < num_classes * num_threads_per_class; i += blockDim.x) {
// if num_threads_per_class == 1, then c == i
int c = i % num_classes;
// iterate over original thread inputs with stride num_threads_per_class
// j is the original thread input
// we have num_classes threads for each j
for (int j = i / num_classes; j < blockDim.x; j += num_threads_per_class) {
for (int item = 0; item < thread_num_rows[j]; ++item) {
float pred = vector_leaf[vector_leaf_indices[j][item] * num_classes + c];
per_class_margin[i][item] += pred;
}
}
}
}
__device__ __forceinline__ void finalize(float* out,
int num_rows,
int num_outputs,
output_t transform,
int num_trees,
int log2_threads_per_tree)
{
if (num_classes < blockDim.x) {
__syncthreads();
// Efficient implementation for small number of classes
auto acc = multi_sum<6>(per_class_margin, num_classes, max(1, blockDim.x / num_classes));
if (threadIdx.x < num_classes) per_class_margin[threadIdx.x] = acc;
__syncthreads();
}
class_margins_to_global_memory(per_class_margin,
per_class_margin + num_classes,
transform,
num_trees,
tmp_storage,
out,
num_rows,
num_outputs);
}
};
template <int NITEMS>
struct tree_aggregator_t<NITEMS, CATEGORICAL_LEAF> {
// could switch to uint16_t to save shared memory
// provided raft::myAtomicAdd(short*) simulated with appropriate shifts
int* votes;
int num_classes;
static size_t smem_finalize_footprint(size_t data_row_size,
int num_classes,
int log2_threads_per_tree,
bool predict_proba)
{
// not accounting for lingering accumulate_footprint during finalize()
return 0;
}
static size_t smem_accumulate_footprint(int num_classes)
{
return sizeof(int) * num_classes * NITEMS;
}
__device__ __forceinline__ tree_aggregator_t(predict_params params,
void* accumulate_workspace,
void* finalize_workspace,
float* vector_leaf)
: num_classes(params.num_classes), votes((int*)accumulate_workspace)
{
for (int c = threadIdx.x; c < num_classes; c += FIL_TPB * NITEMS)
#pragma unroll
for (int item = 0; item < NITEMS; ++item)
votes[c * NITEMS + item] = 0;
// __syncthreads() is called in infer_k
}
__device__ __forceinline__ void accumulate(vec<NITEMS, int> single_tree_prediction,
int tree,
int thread_num_rows)
{
if (thread_num_rows == 0) return;
#pragma unroll
for (int item = 0; item < NITEMS; ++item) {
raft::myAtomicAdd(votes + single_tree_prediction[item] * NITEMS + item, 1);
}
}
// class probabilities or regression. for regression, num_classes
// is just the number of outputs for each data instance
__device__ __forceinline__ void finalize_multiple_outputs(float* out, int num_rows)
{
__syncthreads();
for (int c = threadIdx.x; c < num_classes; c += blockDim.x) {
#pragma unroll
for (int row = 0; row < num_rows; ++row)
out[row * num_classes + c] = votes[c * NITEMS + row];
}
}
// using this when predicting a single class label, as opposed to sparse class vector
// or class probabilities or regression
__device__ __forceinline__ void finalize_class_label(float* out, int num_rows)
{
__syncthreads(); // make sure all votes[] are final
int item = threadIdx.x;
int row = item;
if (item < NITEMS && row < num_rows) {
int max_votes = 0;
int best_class = 0;
for (int c = 0; c < num_classes; ++c) {
if (votes[c * NITEMS + item] > max_votes) {
max_votes = votes[c * NITEMS + item];
best_class = c;
}
}
out[row] = best_class;
}
}
__device__ __forceinline__ void finalize(float* out,
int num_rows,
int num_outputs,
output_t transform,
int num_trees,
int log2_threads_per_tree)
{
if (num_outputs > 1) {
// only supporting num_outputs == num_classes
finalize_multiple_outputs(out, num_rows);
} else {
finalize_class_label(out, num_rows);
}
}
};
template <int NITEMS, leaf_algo_t leaf_algo, bool cols_in_shmem, class storage_type>
__global__ void infer_k(storage_type forest, predict_params params)
{
extern __shared__ char smem[];
float* sdata = (float*)smem;
int sdata_stride = params.sdata_stride();
int rows_per_block = NITEMS << params.log2_threads_per_tree;
int num_cols = params.num_cols;
int thread_row0 = NITEMS * modpow2(threadIdx.x, params.log2_threads_per_tree);
for (int64_t block_row0 = blockIdx.x * rows_per_block; block_row0 < params.num_rows;
block_row0 += rows_per_block * gridDim.x) {
int block_num_rows =
max(0, (int)min((int64_t)rows_per_block, (int64_t)params.num_rows - block_row0));
const float* block_input = params.data + block_row0 * num_cols;
if (cols_in_shmem) {
// cache the row for all threads to reuse
// 2021: latest SMs still do not have >256KiB of shared memory/block required to
// exceed the uint16_t
#pragma unroll
for (uint16_t input_idx = threadIdx.x; input_idx < block_num_rows * num_cols;
input_idx += blockDim.x) {
// for even num_cols, we need to pad sdata_stride to reduce bank conflicts
// assuming here that sdata_stride == num_cols + 1
// then, idx / num_cols * sdata_stride + idx % num_cols == idx + idx / num_cols
uint16_t sdata_idx =
sdata_stride == num_cols ? input_idx : input_idx + input_idx / (uint16_t)num_cols;
sdata[sdata_idx] = block_input[input_idx];
}
#pragma unroll
for (int idx = block_num_rows * sdata_stride; idx < rows_per_block * sdata_stride;
idx += blockDim.x)
sdata[idx] = 0.0f;
}
tree_aggregator_t<NITEMS, leaf_algo> acc(
params, (char*)sdata + params.cols_shmem_size(), sdata, forest.vector_leaf_);
__syncthreads(); // for both row cache init and acc init
// one block works on NITEMS * threads_per_tree rows and the whole forest
// one thread works on NITEMS rows
int thread_tree0 = threadIdx.x >> params.log2_threads_per_tree;
int tree_stride = blockDim.x >> params.log2_threads_per_tree;
int thread_num_rows = max(0, min(NITEMS, block_num_rows - thread_row0));
for (int tree = thread_tree0; tree - thread_tree0 < forest.num_trees(); tree += tree_stride) {
/* tree - thread_tree0 < forest.num_trees() is a necessary but block-uniform
condition for "tree < forest.num_trees()". It lets use __syncthreads()
and is made exact below.
Same with thread_num_rows > 0
*/
typedef typename leaf_output_t<leaf_algo>::T pred_t;
vec<NITEMS, pred_t> prediction;
if (tree < forest.num_trees() && thread_num_rows != 0) {
prediction = infer_one_tree<NITEMS, pred_t>(
forest[tree],
cols_in_shmem ? sdata + thread_row0 * sdata_stride : block_input + thread_row0 * num_cols,
cols_in_shmem ? sdata_stride : num_cols,
cols_in_shmem ? NITEMS : thread_num_rows);
}
// All threads must enter accumulate
// Dummy threads can be marked as having 0 rows
acc.accumulate(prediction, tree, tree < forest.num_trees() ? thread_num_rows : 0);
}
acc.finalize(params.preds + params.num_outputs * block_row0,
block_num_rows,
params.num_outputs,
params.transform,
forest.num_trees(),
params.log2_threads_per_tree);
__syncthreads(); // free up acc's shared memory resources for next row set
}
}
template <int NITEMS, leaf_algo_t leaf_algo>
size_t shmem_size_params::get_smem_footprint()
{
size_t finalize_footprint = tree_aggregator_t<NITEMS, leaf_algo>::smem_finalize_footprint(
cols_shmem_size(), num_classes, log2_threads_per_tree, predict_proba);
size_t accumulate_footprint =
tree_aggregator_t<NITEMS, leaf_algo>::smem_accumulate_footprint(num_classes) +
cols_shmem_size();
return std::max(accumulate_footprint, finalize_footprint);
}
template <int NITEMS>
size_t shmem_size_params::get_smem_footprint()
{
switch (leaf_algo) {
case FLOAT_UNARY_BINARY: return get_smem_footprint<NITEMS, FLOAT_UNARY_BINARY>();
case CATEGORICAL_LEAF: return get_smem_footprint<NITEMS, CATEGORICAL_LEAF>();
case GROVE_PER_CLASS:
if (num_classes > FIL_TPB) return get_smem_footprint<NITEMS, GROVE_PER_CLASS_MANY_CLASSES>();
return get_smem_footprint<NITEMS, GROVE_PER_CLASS_FEW_CLASSES>();
case VECTOR_LEAF: return get_smem_footprint<NITEMS, VECTOR_LEAF>();
default: ASSERT(false, "internal error: unexpected leaf_algo_t");
}
}
void shmem_size_params::compute_smem_footprint()
{
switch (n_items) {
case 1: shm_sz = get_smem_footprint<1>(); break;
case 2: shm_sz = get_smem_footprint<2>(); break;
case 3: shm_sz = get_smem_footprint<3>(); break;
case 4: shm_sz = get_smem_footprint<4>(); break;
default: ASSERT(false, "internal error: n_items > 4");
}
}
template <leaf_algo_t leaf_algo, bool cols_in_shmem, typename storage_type>
void infer_k_nitems_launcher(storage_type forest,
predict_params params,
cudaStream_t stream,
int block_dim_x)
{
switch (params.n_items) {
case 1:
infer_k<1, leaf_algo, cols_in_shmem>
<<<params.num_blocks, block_dim_x, params.shm_sz, stream>>>(forest, params);
break;
case 2:
infer_k<2, leaf_algo, cols_in_shmem>
<<<params.num_blocks, block_dim_x, params.shm_sz, stream>>>(forest, params);
break;
case 3:
infer_k<3, leaf_algo, cols_in_shmem>
<<<params.num_blocks, block_dim_x, params.shm_sz, stream>>>(forest, params);
break;
case 4:
infer_k<4, leaf_algo, cols_in_shmem>
<<<params.num_blocks, block_dim_x, params.shm_sz, stream>>>(forest, params);
break;
default: ASSERT(false, "internal error: nitems > 4");
}
CUDA_CHECK(cudaPeekAtLastError());
}
template <leaf_algo_t leaf_algo, typename storage_type>
void infer_k_launcher(storage_type forest,
predict_params params,
cudaStream_t stream,
int blockdim_x)
{
params.num_blocks = params.num_blocks != 0 ? params.num_blocks
: raft::ceildiv(int(params.num_rows), params.n_items);
if (params.cols_in_shmem) {
infer_k_nitems_launcher<leaf_algo, true>(forest, params, stream, blockdim_x);
} else {
infer_k_nitems_launcher<leaf_algo, false>(forest, params, stream, blockdim_x);
}
}
template <typename storage_type>
void infer(storage_type forest, predict_params params, cudaStream_t stream)
{
switch (params.leaf_algo) {
case FLOAT_UNARY_BINARY:
infer_k_launcher<FLOAT_UNARY_BINARY>(forest, params, stream, FIL_TPB);
break;
case GROVE_PER_CLASS:
if (params.num_classes > FIL_TPB) {
params.leaf_algo = GROVE_PER_CLASS_MANY_CLASSES;
infer_k_launcher<GROVE_PER_CLASS_MANY_CLASSES>(forest, params, stream, FIL_TPB);
} else {
params.leaf_algo = GROVE_PER_CLASS_FEW_CLASSES;
infer_k_launcher<GROVE_PER_CLASS_FEW_CLASSES>(
forest, params, stream, FIL_TPB - FIL_TPB % params.num_classes);
}
break;
case CATEGORICAL_LEAF:
infer_k_launcher<CATEGORICAL_LEAF>(forest, params, stream, FIL_TPB);
break;
case VECTOR_LEAF: infer_k_launcher<VECTOR_LEAF>(forest, params, stream, FIL_TPB); break;
default: ASSERT(false, "internal error: invalid leaf_algo");
}
}
template void infer<dense_storage>(dense_storage forest,
predict_params params,
cudaStream_t stream);
template void infer<sparse_storage16>(sparse_storage16 forest,
predict_params params,
cudaStream_t stream);
template void infer<sparse_storage8>(sparse_storage8 forest,
predict_params params,
cudaStream_t stream);
} // namespace fil
} // namespace ML
|
26e3b18ce6305080516a1070e3183d75bd782416.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float* var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28) {
if (comp > (-1.7917E-36f / +1.6161E-35f)) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
var_3[i] = +1.3808E-43f * (+1.4054E-35f - var_4 - var_5);
comp += var_3[i] - (var_6 - log10f(atan2f(cosf(var_7 - var_8 - +1.8063E35f / +0.0f + var_9), atan2f((var_10 * var_11 * var_12 * fmodf((-1.1895E-35f * -1.2946E-21f + var_13), -0.0f)), +1.0473E0f / +1.1172E36f))));
comp = asinf(-1.8887E19f);
comp = (var_14 / (-1.4624E-37f * -1.3550E25f * +0.0f));
if (comp < -1.3526E-42f * atan2f(-1.2584E-41f + fmodf(var_15 / -1.5098E-37f + (-1.7195E5f * (var_16 / var_17)), (var_18 + +1.4954E28f)), logf(-0.0f))) {
comp = -1.0378E-35f * -1.9795E35f / -1.3511E-41f - var_19 * -1.1929E16f;
}
if (comp <= (var_20 * (-1.6826E-35f - var_21))) {
comp = (var_22 / (+0.0f * (var_23 - -1.5127E-42f * -1.6680E15f * -1.2287E-41f)));
}
if (comp > (var_24 - ceilf(-1.8055E-37f * (var_25 + fabsf((+1.5670E-37f / (var_26 * +0.0f - var_27 + (-1.3188E-16f * var_28)))))))) {
float tmp_1 = +0.0f * +1.8601E20f - -1.4860E35f / -1.4571E36f;
comp = tmp_1 + cosf(sinf(+1.3136E35f));
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float* tmp_4 = initPointer( atof(argv[4]) );
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29);
hipDeviceSynchronize();
return 0;
}
| 26e3b18ce6305080516a1070e3183d75bd782416.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float* var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28) {
if (comp > (-1.7917E-36f / +1.6161E-35f)) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
var_3[i] = +1.3808E-43f * (+1.4054E-35f - var_4 - var_5);
comp += var_3[i] - (var_6 - log10f(atan2f(cosf(var_7 - var_8 - +1.8063E35f / +0.0f + var_9), atan2f((var_10 * var_11 * var_12 * fmodf((-1.1895E-35f * -1.2946E-21f + var_13), -0.0f)), +1.0473E0f / +1.1172E36f))));
comp = asinf(-1.8887E19f);
comp = (var_14 / (-1.4624E-37f * -1.3550E25f * +0.0f));
if (comp < -1.3526E-42f * atan2f(-1.2584E-41f + fmodf(var_15 / -1.5098E-37f + (-1.7195E5f * (var_16 / var_17)), (var_18 + +1.4954E28f)), logf(-0.0f))) {
comp = -1.0378E-35f * -1.9795E35f / -1.3511E-41f - var_19 * -1.1929E16f;
}
if (comp <= (var_20 * (-1.6826E-35f - var_21))) {
comp = (var_22 / (+0.0f * (var_23 - -1.5127E-42f * -1.6680E15f * -1.2287E-41f)));
}
if (comp > (var_24 - ceilf(-1.8055E-37f * (var_25 + fabsf((+1.5670E-37f / (var_26 * +0.0f - var_27 + (-1.3188E-16f * var_28)))))))) {
float tmp_1 = +0.0f * +1.8601E20f - -1.4860E35f / -1.4571E36f;
comp = tmp_1 + cosf(sinf(+1.3136E35f));
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float* tmp_4 = initPointer( atof(argv[4]) );
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29);
cudaDeviceSynchronize();
return 0;
}
|
57a3076c294a51caca0925423337c04c5b634091.hip | // !!! This is a file automatically generated by hipify!!!
// $Id$
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/copy.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <thrust/scatter.h>
#include <thrust/functional.h>
#include "slinktest/src/NodeUtils.hh"
//#include <algorithm>
#include <cstdlib>
#include <asm/types.h>
using namespace std;
#define TIMING_DEETS
// copied from cutil.h
#define CUDA_SAFE_CALL( call) { \
hipError_t err = call; \
if( hipSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, hipGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} }
struct testChi2: public thrust::unary_function<unsigned int, int> {
__host__ __device__
int operator()(unsigned int i ) {
unsigned int chi2 = (i&0xFFU);
if ( chi2 < 16 )
return 1;
return 0;
}
};
#include <signal.h>
static int saw_sigint = 0;
void siginthandler(int signal)
{
saw_sigint = 1;
return;
}
int main(int argc, char **argv)
{
char *progname;
progname = strrchr(argv[0], '/');
if ( progname == (char *) NULL )
progname = argv[0];
else
progname++;
extern char *optarg;
int ntries = 1000;
int shift = 16;
char o;
while (( o = getopt (argc, argv, "dSr:i:s:n:o:hb:e:")) != EOF) {
switch (o) {
case 'n':
ntries = atoi(optarg);
break;
case 's':
shift = atoi(optarg);
break;
}
}
int nwords = (1<<shift);
fprintf(stderr, "Running over %d trials of %d words\n", ntries, nwords);
fprintf(stdout, "# Running over %d trials of %d words\n", ntries, nwords);
signal(SIGINT, siginthandler);
int num_devices, device;
CUDA_SAFE_CALL(hipGetDeviceCount(&num_devices));
if ( num_devices > 1 ) {
int max_multiprocessors = 0, max_device = 0;
hipDeviceProp_t best_prop;
for ( device = 0; device < num_devices; ++device ) {
hipDeviceProp_t properties;
CUDA_SAFE_CALL(hipGetDeviceProperties(&properties, device));
if ( max_multiprocessors < properties.multiProcessorCount ) {
max_multiprocessors = properties.multiProcessorCount;
max_device = device;
best_prop = properties;
}
}
hipSetDevice(max_device);
printf("# Running on device %d (name %s)\n", max_device, best_prop.name);
}
thrust::host_vector<unsigned int> h_vec(nwords);
// map
// result of chi2 test
// these three vectors are of fixed size in this code.
thrust::device_vector<unsigned int> d_chi2(nwords);
thrust::device_vector<unsigned int> d_map(nwords);
thrust::device_vector<unsigned int> d_vec(nwords);
float tbar = 0.0, tsqbar = 0.0;
int n = 0;
int ndiffs = 0;
// GPU RUNNING STARTS HERE
for ( int ev = 0; ev < ntries && !saw_sigint; ++ev ) {
if ( ev % 50 == 0 ) {
fprintf(stderr, "Step %i\n", ev);
}
__u32 t[11];
memset(&t[0],0, sizeof(__u32)*11);
//printf("%u %u %u\n", t[4], t[5], t[6]);
rdtscl(t[0]);
// generate some random numbers serially, on host
std::generate(h_vec.begin(), h_vec.end(), rand);
rdtscl(t[1]);
// transfer data to the device
d_vec = h_vec;
#ifdef TIMING_DEETS
hipDeviceSynchronize(); // block until kernel is finished
#endif // TIMING_DEETS
rdtscl(t[2]);
// chi2 test
thrust::transform(d_vec.begin(), d_vec.end(), d_chi2.begin(), testChi2());
#ifdef TIMING_DEETS
hipDeviceSynchronize(); // block until kernel is finished
#endif // TIMING_DEETS
rdtscl(t[3]);
// copy to output start
// prefix scan, then a scatter to output
thrust::exclusive_scan(d_chi2.begin(), d_chi2.end(), d_map.begin());
#ifdef TIMING_DEETS
hipDeviceSynchronize(); // block until kernel is finished
#endif // TIMING_DEETS
rdtscl(t[4]);
int nout = thrust::reduce(d_chi2.begin(), d_chi2.end());
#ifdef TIMING_DEETS
hipDeviceSynchronize(); // block until kernel is finished
#endif // TIMING_DEETS
rdtscl(t[5]);
thrust::device_vector<unsigned int> d_output(nout);
#ifdef TIMING_DEETS
hipDeviceSynchronize(); // block until kernel is finished
#endif // TIMING_DEETS
rdtscl(t[6]);
thrust::scatter_if(d_vec.begin(), d_vec.end(), d_map.begin(), d_chi2.begin(), d_output.begin());
#ifdef TIMING_DEETS
hipDeviceSynchronize(); // block until kernel is finished
#endif // TIMING_DEETS
rdtscl(t[7]);
// transfer data back to host
thrust::host_vector<unsigned int> h_out(nout);
thrust::copy(d_output.begin(), d_output.end(), h_out.begin());
//thrust::copy_n(d_output.begin(), nout, h_out.begin());
#ifdef TIMING_DEETS
hipDeviceSynchronize(); // block until kernel is finished
#endif // TIMING_DEETS
rdtscl(t[8]);
// copy to output end
// ----------------------------------------
// DEBUG
// ----------------------------------------
// test data
// thrust::host_vector<unsigned int> h_test(nwords), h_test1(nwords), h_test2(nwords);
// thrust::copy(d_map.begin(), d_map.end(), h_test.begin());
// thrust::copy(d_vec.begin(), d_vec.end(), h_test1.begin());
// thrust::copy(d_chi2.begin(), d_chi2.end(), h_test2.begin());
//printf("Event = %d\n", ev);
// for ( int i = 0; i < 120; ++i ) {
// printf("map output %u\t%u\t%08x\t%08x\n", i, h_test[i], h_test1[i], h_test2[i]);
// }
/// END GPU
// fprintf(stderr, "outsize is %i (%5.2f)\n", nout, (1.0*nout/h_vec.size()));
float time_us = tstamp_to_us(t[1], t[8]);
for ( int i = 0; i < 8; ++i ) {
float dt = tstamp_to_us(t[i], t[i+1]);
fprintf(stdout, "%5.2f ", dt);
}
fprintf(stdout, "%5.2f ", time_us);
//fprintf(stdout, "\n");
tsqbar += time_us * time_us;
tbar += time_us;
++n;
// Repeat on CPU
rdtscl(t[9]);
std::vector<unsigned int> output;
for (int i = 0; i < h_vec.size(); ++i ) {
unsigned int chi2 = (h_vec[i] & 0xFFU);
if ( chi2 < 16 ) {
output.push_back(h_vec[i]);
}
}
rdtscl(t[10]);
time_us = tstamp_to_us(t[9], t[10]);
fprintf(stdout, "%5.2f ", time_us);
fprintf(stdout, "\n");
int mismatches = 0;
for ( int i = 0; i < output.size(); ++i ) {
//for ( int i = 0; i < 10; ++i ) {
//fprintf(stderr, "%d\t%08x\t%08x", i, h_out[i], output[i]);
if ( h_out[i] != output[i] ) {
fprintf(stderr, "\t\t-->mismatch");
// // is the wrong data on the device
// for (int j = 0; j < nwords ; ++j ) {
// if (h_out[i] == h_vec[j] ) {
// fprintf(stderr, " correct copy: %08x (%d)", h_test1[j], j);
// }
// }
// // is the right data on the device
// for (int j = 0; j < nwords ; ++j ) {
// if (output[i] == h_test1[j] ) {
// fprintf(stderr, " real exists: %08x (%d)", h_test1[j], j);
// }
// }
++mismatches;
}
//fprintf(stderr,"\n");
}
// fprintf(stderr, "n_out (device) = %d, n_out(host) = %d, mism = %d\n", nout, output.size(),
// mismatches);
ndiffs += mismatches;
}
float trms = sqrt(tsqbar - (tbar*tbar)/(1.0*n))/(n-1.0);
tbar = tbar/(1.0*n);
printf("# timing: %5.2f +- %5.2f us\n", tbar, trms);
printf("# ndiffs = %d\n", ndiffs);
return 0;
}
| 57a3076c294a51caca0925423337c04c5b634091.cu | // $Id$
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/copy.h>
#include <thrust/scan.h>
#include <thrust/transform.h>
#include <thrust/scatter.h>
#include <thrust/functional.h>
#include "slinktest/src/NodeUtils.hh"
//#include <algorithm>
#include <cstdlib>
#include <asm/types.h>
using namespace std;
#define TIMING_DEETS
// copied from cutil.h
#define CUDA_SAFE_CALL( call) { \
cudaError err = call; \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} }
struct testChi2: public thrust::unary_function<unsigned int, int> {
__host__ __device__
int operator()(unsigned int i ) {
unsigned int chi2 = (i&0xFFU);
if ( chi2 < 16 )
return 1;
return 0;
}
};
#include <signal.h>
static int saw_sigint = 0;
void siginthandler(int signal)
{
saw_sigint = 1;
return;
}
int main(int argc, char **argv)
{
char *progname;
progname = strrchr(argv[0], '/');
if ( progname == (char *) NULL )
progname = argv[0];
else
progname++;
extern char *optarg;
int ntries = 1000;
int shift = 16;
char o;
while (( o = getopt (argc, argv, "dSr:i:s:n:o:hb:e:")) != EOF) {
switch (o) {
case 'n':
ntries = atoi(optarg);
break;
case 's':
shift = atoi(optarg);
break;
}
}
int nwords = (1<<shift);
fprintf(stderr, "Running over %d trials of %d words\n", ntries, nwords);
fprintf(stdout, "# Running over %d trials of %d words\n", ntries, nwords);
signal(SIGINT, siginthandler);
int num_devices, device;
CUDA_SAFE_CALL(cudaGetDeviceCount(&num_devices));
if ( num_devices > 1 ) {
int max_multiprocessors = 0, max_device = 0;
cudaDeviceProp best_prop;
for ( device = 0; device < num_devices; ++device ) {
cudaDeviceProp properties;
CUDA_SAFE_CALL(cudaGetDeviceProperties(&properties, device));
if ( max_multiprocessors < properties.multiProcessorCount ) {
max_multiprocessors = properties.multiProcessorCount;
max_device = device;
best_prop = properties;
}
}
cudaSetDevice(max_device);
printf("# Running on device %d (name %s)\n", max_device, best_prop.name);
}
thrust::host_vector<unsigned int> h_vec(nwords);
// map
// result of chi2 test
// these three vectors are of fixed size in this code.
thrust::device_vector<unsigned int> d_chi2(nwords);
thrust::device_vector<unsigned int> d_map(nwords);
thrust::device_vector<unsigned int> d_vec(nwords);
float tbar = 0.0, tsqbar = 0.0;
int n = 0;
int ndiffs = 0;
// GPU RUNNING STARTS HERE
for ( int ev = 0; ev < ntries && !saw_sigint; ++ev ) {
if ( ev % 50 == 0 ) {
fprintf(stderr, "Step %i\n", ev);
}
__u32 t[11];
memset(&t[0],0, sizeof(__u32)*11);
//printf("%u %u %u\n", t[4], t[5], t[6]);
rdtscl(t[0]);
// generate some random numbers serially, on host
std::generate(h_vec.begin(), h_vec.end(), rand);
rdtscl(t[1]);
// transfer data to the device
d_vec = h_vec;
#ifdef TIMING_DEETS
cudaDeviceSynchronize(); // block until kernel is finished
#endif // TIMING_DEETS
rdtscl(t[2]);
// chi2 test
thrust::transform(d_vec.begin(), d_vec.end(), d_chi2.begin(), testChi2());
#ifdef TIMING_DEETS
cudaDeviceSynchronize(); // block until kernel is finished
#endif // TIMING_DEETS
rdtscl(t[3]);
// copy to output start
// prefix scan, then a scatter to output
thrust::exclusive_scan(d_chi2.begin(), d_chi2.end(), d_map.begin());
#ifdef TIMING_DEETS
cudaDeviceSynchronize(); // block until kernel is finished
#endif // TIMING_DEETS
rdtscl(t[4]);
int nout = thrust::reduce(d_chi2.begin(), d_chi2.end());
#ifdef TIMING_DEETS
cudaDeviceSynchronize(); // block until kernel is finished
#endif // TIMING_DEETS
rdtscl(t[5]);
thrust::device_vector<unsigned int> d_output(nout);
#ifdef TIMING_DEETS
cudaDeviceSynchronize(); // block until kernel is finished
#endif // TIMING_DEETS
rdtscl(t[6]);
thrust::scatter_if(d_vec.begin(), d_vec.end(), d_map.begin(), d_chi2.begin(), d_output.begin());
#ifdef TIMING_DEETS
cudaDeviceSynchronize(); // block until kernel is finished
#endif // TIMING_DEETS
rdtscl(t[7]);
// transfer data back to host
thrust::host_vector<unsigned int> h_out(nout);
thrust::copy(d_output.begin(), d_output.end(), h_out.begin());
//thrust::copy_n(d_output.begin(), nout, h_out.begin());
#ifdef TIMING_DEETS
cudaDeviceSynchronize(); // block until kernel is finished
#endif // TIMING_DEETS
rdtscl(t[8]);
// copy to output end
// ----------------------------------------
// DEBUG
// ----------------------------------------
// test data
// thrust::host_vector<unsigned int> h_test(nwords), h_test1(nwords), h_test2(nwords);
// thrust::copy(d_map.begin(), d_map.end(), h_test.begin());
// thrust::copy(d_vec.begin(), d_vec.end(), h_test1.begin());
// thrust::copy(d_chi2.begin(), d_chi2.end(), h_test2.begin());
//printf("Event = %d\n", ev);
// for ( int i = 0; i < 120; ++i ) {
// printf("map output %u\t%u\t%08x\t%08x\n", i, h_test[i], h_test1[i], h_test2[i]);
// }
/// END GPU
// fprintf(stderr, "outsize is %i (%5.2f)\n", nout, (1.0*nout/h_vec.size()));
float time_us = tstamp_to_us(t[1], t[8]);
for ( int i = 0; i < 8; ++i ) {
float dt = tstamp_to_us(t[i], t[i+1]);
fprintf(stdout, "%5.2f ", dt);
}
fprintf(stdout, "%5.2f ", time_us);
//fprintf(stdout, "\n");
tsqbar += time_us * time_us;
tbar += time_us;
++n;
// Repeat on CPU
rdtscl(t[9]);
std::vector<unsigned int> output;
for (int i = 0; i < h_vec.size(); ++i ) {
unsigned int chi2 = (h_vec[i] & 0xFFU);
if ( chi2 < 16 ) {
output.push_back(h_vec[i]);
}
}
rdtscl(t[10]);
time_us = tstamp_to_us(t[9], t[10]);
fprintf(stdout, "%5.2f ", time_us);
fprintf(stdout, "\n");
int mismatches = 0;
for ( int i = 0; i < output.size(); ++i ) {
//for ( int i = 0; i < 10; ++i ) {
//fprintf(stderr, "%d\t%08x\t%08x", i, h_out[i], output[i]);
if ( h_out[i] != output[i] ) {
fprintf(stderr, "\t\t-->mismatch");
// // is the wrong data on the device
// for (int j = 0; j < nwords ; ++j ) {
// if (h_out[i] == h_vec[j] ) {
// fprintf(stderr, " correct copy: %08x (%d)", h_test1[j], j);
// }
// }
// // is the right data on the device
// for (int j = 0; j < nwords ; ++j ) {
// if (output[i] == h_test1[j] ) {
// fprintf(stderr, " real exists: %08x (%d)", h_test1[j], j);
// }
// }
++mismatches;
}
//fprintf(stderr,"\n");
}
// fprintf(stderr, "n_out (device) = %d, n_out(host) = %d, mism = %d\n", nout, output.size(),
// mismatches);
ndiffs += mismatches;
}
float trms = sqrt(tsqbar - (tbar*tbar)/(1.0*n))/(n-1.0);
tbar = tbar/(1.0*n);
printf("# timing: %5.2f +- %5.2f us\n", tbar, trms);
printf("# ndiffs = %d\n", ndiffs);
return 0;
}
|
bf11700c08fe5a568255143eb5838a70c588445e.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHApply.cuh"
#include "THHHalf.h"
#include "THHNumerics.cuh"
#include "generic/ldg.h"
inline int curGPU() {
int curDev;
THCudaCheck(hipGetDevice(&curDev));
return curDev;
}
// Copy operator for the pointwise apply kernel
template <typename TypeDst, typename TypeSrc>
struct CopyOp {
__device__ __forceinline__ void operator()(TypeDst* dst, TypeSrc* src) {
#if __CUDA_ARCH__ >= 350
*dst = ScalarConvert<TypeSrc, TypeDst>::to(__ldg(src));
#else
*dst = ScalarConvert<TypeSrc, TypeDst>::to(*src);
#endif
}
};
struct CCopyOp {
__device__ __forceinline__ void operator()(ccx* dst, ccx* src) {
#if __CUDA_ARCH__ >= 350
*dst = ScalarConvert<ccx, ccx>::to(__ldg(src));
#else
*dst = ScalarConvert<ccx, ccx>::to(*src);
#endif
}
};
struct ZCopyOp {
__device__ __forceinline__ void operator()(zcx* dst, zcx* src) {
#if __CUDA_ARCH__ >= 350
*dst = ScalarConvert<zcx, zcx>::to(__ldg(src));
#else
*dst = ScalarConvert<zcx, zcx>::to(*src);
#endif
}
};
// Copy for the same type to the same type
template <typename TensorTypeDst, typename TensorTypeSrc>
void
THC_copyTensor(THCState* state, TensorTypeDst* dst, TensorTypeSrc* src) {
ptrdiff_t totalElements = TensorUtils<TensorTypeDst>::getNumElements(state, dst);
THArgCheck(totalElements ==
TensorUtils<TensorTypeSrc>::getNumElements(state, src),
2, "sizes do not match");
if (TensorUtils<TensorTypeDst>::getDims(state, dst) == 0) {
// Zero-dim tensor; copy nothing
return;
}
// We can memcpy the memory if:
// -both tensors are contiguous; or,
// -there is only one element to copy; or,
// -FIXME: if both tensors have matching size and stride arrays, and no
// holes within (in other words, there is some permutation that can be applied
// to the size/strides such that the resulting tensor is
// contiguous).
// -AND: both tensors have the same type.
bool sameType = isSameType<TensorTypeSrc, TensorTypeDst>();
bool srcContig = TensorUtils<TensorTypeSrc>::isContiguous(state, src);
bool dstContig = TensorUtils<TensorTypeDst>::isContiguous(state, dst);
bool memcpyEligible =
((srcContig && dstContig) || (totalElements == 1)) && sameType;
int srcDev = TensorUtils<TensorTypeSrc>::getDevice(state, src);
int dstDev = TensorUtils<TensorTypeDst>::getDevice(state, dst);
int oldDev = curGPU();
// Try to enable p2p access. This also handles the case srcDev == dstDev.
bool p2pEnabled = THCState_getPeerToPeerAccess(state, srcDev, dstDev);
// We always perform the copy on the source device, using the
// current stream on the source device.
// If the copy is on the default stream, then we fully synchronize
// both src and dst's default streams for completion of the
// copy. We have to explicitly do this for non-contig copies.
// This mimics the behavior of cross-device hipMemcpyAsync on
// the default stream.
// If the copy is not on the default stream, then it is up to the
// user to add needed synchronization on the dst device, since the
// stream on the dst device that wishes to synchronize may not be
// the same index as the one on the src device.
hipStream_t copyStream = THCState_getCurrentStreamOnDevice(state, srcDev);
if (srcDev != dstDev && copyStream == NULL) {
// This is a cross-device copy on the default stream. We perform a
// two-way barrier between both devices' default streams before
// the copy. This ensures that any write-after-write and
// write-after-read dependencies on the destination side are
// handled, so that no one is operating on the dst memory when
// we perform the copy.
// src waits on dst barrier (src already waits on src)
hipEvent_t dstReady;
THCudaCheck(hipSetDevice(dstDev));
THCudaCheck(hipEventCreateWithFlags(&dstReady, hipEventDisableTiming));
THCudaCheck(hipEventRecord(dstReady, NULL));
THCudaCheck(hipSetDevice(srcDev));
THCudaCheck(hipStreamWaitEvent(NULL, dstReady, 0));
THCudaCheck(hipEventDestroy(dstReady));
} else if (srcDev != oldDev) {
THCudaCheck(hipSetDevice(srcDev));
}
// We are now on srcDev
if (memcpyEligible) {
// Perform the copy
THCudaCheck(hipMemcpyAsync(
TensorUtils<TensorTypeDst>::getData(state, dst),
TensorUtils<TensorTypeSrc>::getData(state, src),
totalElements *
sizeof(typename TensorUtils<TensorTypeDst>::DataType),
hipMemcpyDeviceToDevice,
copyStream));
} else {
// Non-contiguous copy or a type-conversion copy
// We avoid creating temporary memory copies if possible.
// If both src and dst are on the same device, or if they are on
// different devices and p2p access is enabled, perform the copy
// by a pointwise copy kernel.
// Otherwise, we'll have to make contiguous (which will in fact
// invoke copy() again), and then perform the copy.
// FIXME: might want to consider only running the pointwise kernel
// if both src and dst innermost dimensions are contiguous. If
// they are not, then taking the hit of the memory allocation/free
// might be worth it to avoid non-coalesced reads or writes.
if (p2pEnabled) {
bool succ =
THC_pointwiseApply2(
state, dst, src,
CopyOp<typename TensorUtils<TensorTypeDst>::DataType,
typename TensorUtils<TensorTypeSrc>::DataType>());
THArgCheck(succ, 2, CUTORCH_DIM_WARNING);
} else {
// GPUs can't access each other directly, but the tensors
// involved are non-contiguous and/or are different types.
// Make sure the src is contiguous and in the same type as dst
THCudaCheck(hipSetDevice(srcDev));
TensorTypeDst* srcContig = NULL;
if (sameType) {
srcContig =
(TensorTypeDst*) // this is actually the same type as src
TensorUtils<TensorTypeSrc>::newContiguous(state, src);
} else {
// Types are different
// Copy into the new format, contiguous, on the source device
srcContig = TensorUtils<TensorTypeDst>::newTensor(state);
TensorUtils<TensorTypeDst>::resizeAs(state, srcContig, dst);
bool succ =
THC_pointwiseApply2(
state, srcContig, src,
CopyOp<typename TensorUtils<TensorTypeDst>::DataType,
typename TensorUtils<TensorTypeSrc>::DataType>());
THArgCheck(succ, 2, CUTORCH_DIM_WARNING);
}
// Make sure the dst is contiguous
THCudaCheck(hipSetDevice(dstDev));
TensorTypeDst* dstContig =
TensorUtils<TensorTypeDst>::newContiguous(state, dst);
// Now, we are ready for a cross-device memcpy of contiguous
// data, of the same layout and type
THCudaCheck(hipSetDevice(srcDev));
THCudaCheck(hipMemcpyAsync(
TensorUtils<TensorTypeDst>::getData(state, dstContig),
TensorUtils<TensorTypeDst>::getData(state, srcContig),
totalElements *
sizeof(typename TensorUtils<TensorTypeDst>::DataType),
hipMemcpyDeviceToDevice,
copyStream));
// We are done with the src
TensorUtils<TensorTypeDst>::free(state, srcContig);
if (dst != dstContig) {
TensorUtils<TensorTypeDst>::freeCopyTo(state, dstContig, dst);
} else {
TensorUtils<TensorTypeDst>::free(state, dstContig);
}
// We're still on srcDev at this point
}
}
if (srcDev != dstDev && copyStream == NULL) {
// dst waits on src barrier (dst already waits on dst). We cannot
// operate on dst's copy until the copy is complete.
// Still on srcDev, record default stream event
hipEvent_t srcReady;
THCudaCheck(hipEventCreateWithFlags(&srcReady, hipEventDisableTiming));
THCudaCheck(hipEventRecord(srcReady, NULL));
THCudaCheck(hipSetDevice(dstDev));
THCudaCheck(hipStreamWaitEvent(NULL, srcReady, 0));
THCudaCheck(hipEventDestroy(srcReady));
// We are now on dstDev (right above). Restore prior device from dst
if (dstDev != oldDev) {
THCudaCheck(hipSetDevice(oldDev));
}
} else {
// We are still on srcDev. Restore prior device from src
if (srcDev != oldDev) {
THCudaCheck(hipSetDevice(oldDev));
}
}
THCudaCheck(hipGetLastError());
}
#include "generic/THCTensorCopy.cu"
#include "THHGenerateAllTypes.h"
| bf11700c08fe5a568255143eb5838a70c588445e.cu | #include "THCApply.cuh"
#include "THCHalf.h"
#include "THCNumerics.cuh"
#include "generic/ldg.h"
inline int curGPU() {
int curDev;
THCudaCheck(cudaGetDevice(&curDev));
return curDev;
}
// Copy operator for the pointwise apply kernel
template <typename TypeDst, typename TypeSrc>
struct CopyOp {
__device__ __forceinline__ void operator()(TypeDst* dst, TypeSrc* src) {
#if __CUDA_ARCH__ >= 350
*dst = ScalarConvert<TypeSrc, TypeDst>::to(__ldg(src));
#else
*dst = ScalarConvert<TypeSrc, TypeDst>::to(*src);
#endif
}
};
struct CCopyOp {
__device__ __forceinline__ void operator()(ccx* dst, ccx* src) {
#if __CUDA_ARCH__ >= 350
*dst = ScalarConvert<ccx, ccx>::to(__ldg(src));
#else
*dst = ScalarConvert<ccx, ccx>::to(*src);
#endif
}
};
struct ZCopyOp {
__device__ __forceinline__ void operator()(zcx* dst, zcx* src) {
#if __CUDA_ARCH__ >= 350
*dst = ScalarConvert<zcx, zcx>::to(__ldg(src));
#else
*dst = ScalarConvert<zcx, zcx>::to(*src);
#endif
}
};
// Copy for the same type to the same type
template <typename TensorTypeDst, typename TensorTypeSrc>
void
THC_copyTensor(THCState* state, TensorTypeDst* dst, TensorTypeSrc* src) {
ptrdiff_t totalElements = TensorUtils<TensorTypeDst>::getNumElements(state, dst);
THArgCheck(totalElements ==
TensorUtils<TensorTypeSrc>::getNumElements(state, src),
2, "sizes do not match");
if (TensorUtils<TensorTypeDst>::getDims(state, dst) == 0) {
// Zero-dim tensor; copy nothing
return;
}
// We can memcpy the memory if:
// -both tensors are contiguous; or,
// -there is only one element to copy; or,
// -FIXME: if both tensors have matching size and stride arrays, and no
// holes within (in other words, there is some permutation that can be applied
// to the size/strides such that the resulting tensor is
// contiguous).
// -AND: both tensors have the same type.
bool sameType = isSameType<TensorTypeSrc, TensorTypeDst>();
bool srcContig = TensorUtils<TensorTypeSrc>::isContiguous(state, src);
bool dstContig = TensorUtils<TensorTypeDst>::isContiguous(state, dst);
bool memcpyEligible =
((srcContig && dstContig) || (totalElements == 1)) && sameType;
int srcDev = TensorUtils<TensorTypeSrc>::getDevice(state, src);
int dstDev = TensorUtils<TensorTypeDst>::getDevice(state, dst);
int oldDev = curGPU();
// Try to enable p2p access. This also handles the case srcDev == dstDev.
bool p2pEnabled = THCState_getPeerToPeerAccess(state, srcDev, dstDev);
// We always perform the copy on the source device, using the
// current stream on the source device.
// If the copy is on the default stream, then we fully synchronize
// both src and dst's default streams for completion of the
// copy. We have to explicitly do this for non-contig copies.
// This mimics the behavior of cross-device cudaMemcpyAsync on
// the default stream.
// If the copy is not on the default stream, then it is up to the
// user to add needed synchronization on the dst device, since the
// stream on the dst device that wishes to synchronize may not be
// the same index as the one on the src device.
cudaStream_t copyStream = THCState_getCurrentStreamOnDevice(state, srcDev);
if (srcDev != dstDev && copyStream == NULL) {
// This is a cross-device copy on the default stream. We perform a
// two-way barrier between both devices' default streams before
// the copy. This ensures that any write-after-write and
// write-after-read dependencies on the destination side are
// handled, so that no one is operating on the dst memory when
// we perform the copy.
// src waits on dst barrier (src already waits on src)
cudaEvent_t dstReady;
THCudaCheck(cudaSetDevice(dstDev));
THCudaCheck(cudaEventCreateWithFlags(&dstReady, cudaEventDisableTiming));
THCudaCheck(cudaEventRecord(dstReady, NULL));
THCudaCheck(cudaSetDevice(srcDev));
THCudaCheck(cudaStreamWaitEvent(NULL, dstReady, 0));
THCudaCheck(cudaEventDestroy(dstReady));
} else if (srcDev != oldDev) {
THCudaCheck(cudaSetDevice(srcDev));
}
// We are now on srcDev
if (memcpyEligible) {
// Perform the copy
THCudaCheck(cudaMemcpyAsync(
TensorUtils<TensorTypeDst>::getData(state, dst),
TensorUtils<TensorTypeSrc>::getData(state, src),
totalElements *
sizeof(typename TensorUtils<TensorTypeDst>::DataType),
cudaMemcpyDeviceToDevice,
copyStream));
} else {
// Non-contiguous copy or a type-conversion copy
// We avoid creating temporary memory copies if possible.
// If both src and dst are on the same device, or if they are on
// different devices and p2p access is enabled, perform the copy
// by a pointwise copy kernel.
// Otherwise, we'll have to make contiguous (which will in fact
// invoke copy() again), and then perform the copy.
// FIXME: might want to consider only running the pointwise kernel
// if both src and dst innermost dimensions are contiguous. If
// they are not, then taking the hit of the memory allocation/free
// might be worth it to avoid non-coalesced reads or writes.
if (p2pEnabled) {
bool succ =
THC_pointwiseApply2(
state, dst, src,
CopyOp<typename TensorUtils<TensorTypeDst>::DataType,
typename TensorUtils<TensorTypeSrc>::DataType>());
THArgCheck(succ, 2, CUTORCH_DIM_WARNING);
} else {
// GPUs can't access each other directly, but the tensors
// involved are non-contiguous and/or are different types.
// Make sure the src is contiguous and in the same type as dst
THCudaCheck(cudaSetDevice(srcDev));
TensorTypeDst* srcContig = NULL;
if (sameType) {
srcContig =
(TensorTypeDst*) // this is actually the same type as src
TensorUtils<TensorTypeSrc>::newContiguous(state, src);
} else {
// Types are different
// Copy into the new format, contiguous, on the source device
srcContig = TensorUtils<TensorTypeDst>::newTensor(state);
TensorUtils<TensorTypeDst>::resizeAs(state, srcContig, dst);
bool succ =
THC_pointwiseApply2(
state, srcContig, src,
CopyOp<typename TensorUtils<TensorTypeDst>::DataType,
typename TensorUtils<TensorTypeSrc>::DataType>());
THArgCheck(succ, 2, CUTORCH_DIM_WARNING);
}
// Make sure the dst is contiguous
THCudaCheck(cudaSetDevice(dstDev));
TensorTypeDst* dstContig =
TensorUtils<TensorTypeDst>::newContiguous(state, dst);
// Now, we are ready for a cross-device memcpy of contiguous
// data, of the same layout and type
THCudaCheck(cudaSetDevice(srcDev));
THCudaCheck(cudaMemcpyAsync(
TensorUtils<TensorTypeDst>::getData(state, dstContig),
TensorUtils<TensorTypeDst>::getData(state, srcContig),
totalElements *
sizeof(typename TensorUtils<TensorTypeDst>::DataType),
cudaMemcpyDeviceToDevice,
copyStream));
// We are done with the src
TensorUtils<TensorTypeDst>::free(state, srcContig);
if (dst != dstContig) {
TensorUtils<TensorTypeDst>::freeCopyTo(state, dstContig, dst);
} else {
TensorUtils<TensorTypeDst>::free(state, dstContig);
}
// We're still on srcDev at this point
}
}
if (srcDev != dstDev && copyStream == NULL) {
// dst waits on src barrier (dst already waits on dst). We cannot
// operate on dst's copy until the copy is complete.
// Still on srcDev, record default stream event
cudaEvent_t srcReady;
THCudaCheck(cudaEventCreateWithFlags(&srcReady, cudaEventDisableTiming));
THCudaCheck(cudaEventRecord(srcReady, NULL));
THCudaCheck(cudaSetDevice(dstDev));
THCudaCheck(cudaStreamWaitEvent(NULL, srcReady, 0));
THCudaCheck(cudaEventDestroy(srcReady));
// We are now on dstDev (right above). Restore prior device from dst
if (dstDev != oldDev) {
THCudaCheck(cudaSetDevice(oldDev));
}
} else {
// We are still on srcDev. Restore prior device from src
if (srcDev != oldDev) {
THCudaCheck(cudaSetDevice(oldDev));
}
}
THCudaCheck(cudaGetLastError());
}
#include "generic/THCTensorCopy.cu"
#include "THCGenerateAllTypes.h"
|
177f75048b26583e4062a60b0ccf3f5d3dd830a6.hip | // !!! This is a file automatically generated by hipify!!!
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "row_filter.h"
<<<<<<< HEAD
=======
#ifndef OPENCV_TINY_GPU_MODULE
>>>>>>> 4a5a6cfc1ba26f73cbd6c6fcaf561ca6dbced81d
namespace filter
{
template void linearRow<short4, float4>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, hipStream_t stream);
}
<<<<<<< HEAD
=======
#endif
>>>>>>> 4a5a6cfc1ba26f73cbd6c6fcaf561ca6dbced81d
#endif /* CUDA_DISABLER */
| 177f75048b26583e4062a60b0ccf3f5d3dd830a6.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "row_filter.h"
<<<<<<< HEAD
=======
#ifndef OPENCV_TINY_GPU_MODULE
>>>>>>> 4a5a6cfc1ba26f73cbd6c6fcaf561ca6dbced81d
namespace filter
{
template void linearRow<short4, float4>(PtrStepSzb src, PtrStepSzb dst, const float* kernel, int ksize, int anchor, int brd_type, int cc, cudaStream_t stream);
}
<<<<<<< HEAD
=======
#endif
>>>>>>> 4a5a6cfc1ba26f73cbd6c6fcaf561ca6dbced81d
#endif /* CUDA_DISABLER */
|
146c0173d573080da8a9715d635ee3f57bf5495f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<string.h>
#include<stdlib.h>
#include<math.h>
#include<unistd.h>
#include<cuda_runtime.h>
using namespace std;
#define SUBMATRIX_SIZE 16384
////////////////////////////////////////////////////////////////////////
// Number of histogram bins has to be edited by hand, prior to
// copmilation.
////////////////////////////////////////////////////////////////////////
#define DEFAULT_NBINS 254
//#define DEFAULT_NBINS 126
//#define DEFAULT_NBINS 62
//#define DEFAULT_NBINS 30
#define CONV_FACTOR 57.2957795 // 180/pi
int doCalcRaDec(FILE *infile0, FILE *infile1, FILE *outfile, bool silent_on_GPU_testing, float scale_factor, int nbins, float hist_lower_range, float hist_upper_range, float hist_bin_width, int log_binning_flag, bool two_different_files, float conv_factor_angle);
int doCalcMpc(FILE *infile0, FILE *infile1, FILE *outfile, bool silent_on_GPU_testing, float scale_factor, int nbins, float hist_lower_range, float hist_upper_range, float hist_bin_width, int log_binning_flag, bool two_different_files, float conv_factor_angle);
void getDeviceDiagnostics(int tot_Gals, int n_coords);
////////////////////////////////////////////////////////////////////////
// Kernel to calculate angular distances between galaxies and histogram
// the distances.
////////////////////////////////////////////////////////////////////////
__global__ void distance(volatile float *a0, volatile float *d0, volatile float *a1, volatile float *d1, int xind, int yind, int max_xind, int max_yind, volatile int *dev_hist, float hist_min, float hist_max, int nbins, float bin_width, int log_binning=0, bool two_different_files=1, float conv_factor_angle=57.2957795)
{
////////////////////////////////////////////////////////////////////////////
// Idx will keep track of which thread is being calculated within a given
// warp.
////////////////////////////////////////////////////////////////////////////
int idx = blockIdx.x * blockDim.x + threadIdx.x; // This should range to SUBMATRIX_SIZE
idx += xind;
////////////////////////////////////////////////////////////////////////
// Shared memory stuff.
////////////////////////////////////////////////////////////////////////
__shared__ int shared_hist[DEFAULT_NBINS+2];
// Note that we only clear things out for the first thread on each block.
if(threadIdx.x==0)
{
for (int i=0;i<nbins+2;i++)
shared_hist[i] = 0;
}
__syncthreads();
////////////////////////////////////////////////////////////////////////
if (idx<max_xind)
{
int i=0;
float alpha_rad = a0[idx];
float delta0 = d0[idx];
float cos_d0 = cos(delta0);
float sin_d0 = sin(delta0);
float dist;
int bin_index = 0;
float a_diff, sin_a_diff, cos_a_diff;
float cos_d1, sin_d1, numer, denom, mult1, mult2;
float d1_rad;
bool do_calc = 1;
int ymax = yind + SUBMATRIX_SIZE;
if (ymax>max_yind)
{
ymax = max_yind;
}
for(i=yind; i<ymax; i++)
{
if (two_different_files)
{
do_calc = 1;
}
else // Doing the same file
{
if(idx > i)
do_calc=1;
else
do_calc=0;
}
//if(idx > i) ///////// CHECK THIS
if (do_calc)
{
a_diff = a1[i] - alpha_rad;
d1_rad = d1[i];
sin_a_diff = sin(a_diff);
cos_a_diff = cos(a_diff);
sin_d1 = sin(d1_rad);
cos_d1 = cos(d1_rad);
mult1 = cos_d1 * cos_d1 * sin_a_diff * sin_a_diff;
mult2 = cos_d0 * sin_d1 - sin_d0 * cos_d1 * cos_a_diff;
mult2 = mult2 * mult2;
numer = sqrt(mult1 + mult2);
denom = sin_d0 *sin_d1 + cos_d0 * cos_d1 * cos_a_diff;
dist = atan2(numer,denom);
dist *= conv_factor_angle; // Convert to degrees or what have you.
if(dist < hist_min)
bin_index = 0;
else if(dist >= hist_max)
bin_index = nbins + 1;
else
{
if (log_binning==0)
{
bin_index = int((dist-hist_min)/bin_width) + 1;
}
else if (log_binning==1)// log binning
{
bin_index = int((log(dist)-log(hist_min))/bin_width) + 1;
}
else if (log_binning==2)// log 10 binning
{
bin_index = int((log10(dist)-log10(hist_min))/bin_width) + 1;
}
}
atomicAdd(&shared_hist[bin_index],1);
}
}
}
__syncthreads();
if(threadIdx.x==0)
{
for(int i=0;i<nbins+2;i++)
dev_hist[i+(blockIdx.x*(nbins+2))]=shared_hist[i];
}
}
////////////////////////////////////////////////////////////////////////
// Kernel to calculate angular distances between galaxies and histogram
// the distances.
// Assuming coordinates are already in x,y,z (in Mpc)
////////////////////////////////////////////////////////////////////////
__global__ void distanceMpc(volatile float *x0, volatile float *y0, volatile float *z0, volatile float *x1, volatile float *y1, volatile float *z1, int xind, int yind, int max_xind, int max_yind, volatile int *dev_hist, float hist_min, float hist_max, int nbins, float bin_width, int log_binning=0, bool two_different_files=1, float conv_factor_angle=57.2957795)
{
////////////////////////////////////////////////////////////////////////////
// Idx will keep track of which thread is being calculated within a given
// warp.
////////////////////////////////////////////////////////////////////////////
int idx = blockIdx.x * blockDim.x + threadIdx.x; // This should range to SUBMATRIX_SIZE
idx += xind;
////////////////////////////////////////////////////////////////////////
// Shared memory stuff.
////////////////////////////////////////////////////////////////////////
__shared__ int shared_hist[DEFAULT_NBINS+2];
// Note that we only clear things out for the first thread on each block.
if(threadIdx.x==0)
{
for (int i=0;i<nbins+2;i++)
shared_hist[i] = 0;
}
__syncthreads();
////////////////////////////////////////////////////////////////////////
if (idx<max_xind)
{
int i=0;
float dist, xdiff, ydiff, zdiff;
int bin_index = 0;
bool do_calc = 1;
int ymax = yind + SUBMATRIX_SIZE;
if (ymax>max_yind)
{
ymax = max_yind;
}
for(i=yind; i<ymax; i++)
{
if (two_different_files)
{
do_calc = 1;
}
else // Doing the same file
{
if(idx > i)
do_calc=1;
else
do_calc=0;
}
//if(idx > i) ///////// CHECK THIS
if (do_calc)
{
// this is a way simpler calculation. We already have the x,y,z coodis in co-moving distance, so we can simply do the distance
xdiff = x0[idx] - x1[idx];
ydiff = y0[idx] - y1[idx];
zdiff = z0[idx] - z1[idx];
dist = sqrt( (xdiff*xdiff) + (ydiff*ydiff) + (zdiff*zdiff));
if(dist < hist_min)
bin_index = 0;
else if(dist >= hist_max)
bin_index = nbins + 1;
else
{
if (log_binning==0)
{
bin_index = int((dist-hist_min)/bin_width) + 1;
}
else if (log_binning==1)// log binning
{
bin_index = int((log(dist)-log(hist_min))/bin_width) + 1;
}
else if (log_binning==2)// log 10 binning
{
bin_index = int((log10(dist)-log10(hist_min))/bin_width) + 1;
}
}
atomicAdd(&shared_hist[bin_index],1);
}
}
}
__syncthreads();
if(threadIdx.x==0)
{
for(int i=0;i<nbins+2;i++)
dev_hist[i+(blockIdx.x*(nbins+2))]=shared_hist[i];
}
}
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
// Main
////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
// Needed for parsing command-line arguments.
extern char *optarg;
extern int optind, optopt, opterr;
int c;
char *outfilename = NULL;
char defaultoutfilename[256];
sprintf(defaultoutfilename,"default_out.dat");
float hist_lower_range = 0.0000001;
float hist_upper_range = 0;
int nbins = DEFAULT_NBINS;
float hist_bin_width = 0.05;
int log_binning_flag = 0; // False
float scale_factor = 1.0; // For if we need to convert input to arcsec or arcmin
float conv_factor_angle = 57.2957795; // 180/pi // For if we need to convert arcdistance to arcsec or arcmin
int radec_input = 1; // are we using ra/dec coords, or x/y/z coords?
bool silent_on_GPU_testing = false;
int cuda_device = 0;
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
while ((c = getopt(argc, argv, "ao:L:l:w:smSd:")) != -1) {
switch(c) {
case 'L':
printf("L is set\n");
hist_lower_range = atof(optarg);
break;
case 'w':
hist_bin_width = atof(optarg);
printf("Histogram bin width: %f\n",hist_bin_width);
break;
case 'l':
log_binning_flag = atoi(optarg);
printf("Will use log binning.\n");
break;
case 's':
scale_factor = 206264.0; // To convert arcseconds to radians.
conv_factor_angle *= 3600.0; // convert radians to arcseconds.
printf("Reading in values assuming they are arcseconds.\n");
printf("scale_factor: %f\n",scale_factor);
printf("conv_factor_angle: %f\n",conv_factor_angle);
break;
case 'm':
scale_factor = 3437.74677; // To convert arcminutes to radians.
conv_factor_angle *= 60.0; // convert radians to arcminutes.
printf("scale_factor: %f\n",scale_factor);
printf("conv_factor_angle: %f\n",conv_factor_angle);
printf("Reading in values assuming they are arcminutes.\n");
break;
case 'o':
outfilename = optarg;
printf("Output filename is %s\n", outfilename);
break;
case 'd':
cuda_device = atoi(optarg); // Use this CUDA device.
conv_factor_angle *= 3600.0; // convert radians to arcseconds.
printf("Will attempt to use CUDA device %d\n",cuda_device);
break;
case 'S':
printf("Silent mode - don't run the GPU test (suppresses some output)\n");
silent_on_GPU_testing = true;
break;
case 'p':
printf("Using input files in Mpc format");
radec_input = 0;
break;
case '?':
printf("unknown arg %c\n", optopt);
break;
}
}
if (argc < 2)
{
printf("\nMust pass in at least two input files on command line!\n");
printf("\nUsage: ", argv[0] );
//printf(" <cluster_data file> <distances file> \n\n");
exit(1);
}
// Set a default output file name, if none was passed in on the
// command line.
if (outfilename == NULL)
{
outfilename = defaultoutfilename;
printf("Output filename is %s\n", outfilename);
}
float temp_lo = hist_lower_range;
if (hist_upper_range == 0)
{
if (log_binning_flag==0)
{
for (int i=0;i<nbins;i++)
{
hist_upper_range = temp_lo + hist_bin_width;
temp_lo = hist_upper_range;
}
}
else if (log_binning_flag==1)
{
for (int i=0;i<nbins;i++)
{
hist_upper_range = exp(log(temp_lo) + hist_bin_width);
temp_lo = hist_upper_range;
}
}
else if (log_binning_flag==2)
{
for (int i=0;i<nbins;i++)
{
hist_upper_range = pow(10,(log10(temp_lo) + hist_bin_width));
temp_lo = hist_upper_range;
}
}
}
printf("hist_upper_range: %f\n",hist_upper_range);
FILE *infile0, *infile1, *outfile ;
infile0 = fopen(argv[optind],"r");
infile1 = fopen(argv[optind+1],"r");
printf("Opening input file 0: %s\n",argv[optind]);
printf("Opening input file 1: %s\n",argv[optind+1]);
outfile = fopen(outfilename, "w");
////////////////////////////////////////////////////////////////////////////
// Check to see if the two files are actually the same file.
// This is the case for the DD and RR calculations and change slightly
// the exact calculations being performed.
////////////////////////////////////////////////////////////////////////////
bool two_different_files = 1;
if (strcmp(argv[optind],argv[optind+1])==0)
{
two_different_files = 0;
printf("Using the same file!\n");
}
printf("\n");
////////////////////////////////////////////////////////////////////////
// Set the CUDA device. This is useful if your machine has multiple GPUs
// on it.
////////////////////////////////////////////////////////////////////////
hipError_t error_id = hipSetDevice(cuda_device);
if (error_id == hipSuccess) {
printf( "hipSetDevice returned %d\n-> %s\n", (int)error_id, hipGetErrorString(error_id) );
}
else{
printf( "hipSetDevice failed on Device %d!\n\n",cuda_device);
exit(-1);
}
if(radec_input==1) int success = doCalcRaDec(infile0, infile1, outfile, silent_on_GPU_testing, scale_factor, nbins, hist_lower_range, hist_upper_range, hist_bin_width, log_binning_flag, two_different_files, conv_factor_angle);
else int success = doCalcMpc(infile0, infile1, outfile, silent_on_GPU_testing, scale_factor, nbins, hist_lower_range, hist_upper_range, hist_bin_width, log_binning_flag, two_different_files, conv_factor_angle);
}
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
// Module that runs the calculations for input files in the format ra:dec
////////////////////////////////////////////////////////////////////////
int doCalcRaDec(FILE *infile0, FILE *infile1, FILE *outfile, bool silent_on_GPU_testing, float scale_factor, int nbins, float hist_lower_range, float hist_upper_range, float hist_bin_width, int log_binning_flag, bool two_different_files, float conv_factor_angle){
float *d_alpha0, *d_delta0;
float *h_alpha0, *h_delta0;
float *d_alpha1, *d_delta1;
float *h_alpha1, *h_delta1;
int NUM_GALAXIES0;
int NUM_GALAXIES1;
//////////////////////////////////////////////////////////////////////
// Read in the galaxy files.
////////////////////////////////////////////////////////////////////////////
// Read in the first file
////////////////////////////////////////////////////////////////////////////
fscanf(infile0, "%d", &NUM_GALAXIES0);
int size_of_galaxy_array0 = NUM_GALAXIES0 * sizeof(float);
printf("SIZE 0 # GALAXIES: %d\n",NUM_GALAXIES0);
h_alpha0 = (float*)malloc(size_of_galaxy_array0);
h_delta0 = (float*)malloc(size_of_galaxy_array0);
float temp0, temp1;
for(int i=0; i<NUM_GALAXIES0; i++)
{
fscanf(infile0, "%f %f", &temp0, &temp1);
h_alpha0[i] = temp0/scale_factor;
h_delta0[i] = temp1/scale_factor;
//if (i<10)
//printf("%e %e\n", h_alpha0[i], h_delta0[i]);
}
////////////////////////////////////////////////////////////////////////////
// Read in the second file
////////////////////////////////////////////////////////////////////////////
fscanf(infile1, "%d", &NUM_GALAXIES1);
int size_of_galaxy_array1 = NUM_GALAXIES1 * sizeof(float);
printf("SIZE 1 # GALAXIES: %d\n",NUM_GALAXIES1);
h_alpha1 = (float*)malloc(size_of_galaxy_array1);
h_delta1 = (float*)malloc(size_of_galaxy_array1);
for(int i=0; i<NUM_GALAXIES1; i++)
{
fscanf(infile1, "%f %f", &temp0, &temp1);
h_alpha1[i] = temp0/scale_factor;
h_delta1[i] = temp1/scale_factor;
//if (i<10)
//printf("%e %e\n", h_alpha1[i], h_delta1[i]);
}
//get device diagnostics
if (!silent_on_GPU_testing) getDeviceDiagnostics(NUM_GALAXIES0+NUM_GALAXIES1, 2);
////////////////////////////////////////////////////////////////////////////
// Allocation of histogram
///////////////////////////////////////////////////////////////////////////
int *hist, *dev_hist;
int size_hist = SUBMATRIX_SIZE * (nbins+2);
int size_hist_bytes = size_hist*sizeof(int);
hist = (int*)malloc(size_hist_bytes);
memset(hist, 0, size_hist_bytes);
printf("Size of histogram: %d bytes\n",size_hist_bytes);
hipMalloc((void **) &dev_hist, (size_hist_bytes));
hipMemset(dev_hist, 0, size_hist_bytes);
unsigned long *hist_array;
int hist_array_size = (nbins+2) * sizeof(unsigned long);
hist_array = (unsigned long*)malloc(hist_array_size);
printf("Size of histogram array: %d bytes\n",hist_array_size);
memset(hist_array,0,hist_array_size);
////////////////////////////////////////////////////////////////////////////
// Define the grid and block size
////////////////////////////////////////////////////////////////////////////
dim3 grid, block;
// 128*4 = 512, the amount of memory needed for one histogram.
// 8192*4 = 32768 is max memory to ask for for the histograms.
// 8192/128 = 64, is is the right number of blocks?
grid.x = 8192/(DEFAULT_NBINS+2); // Is this the number of blocks?
block.x = SUBMATRIX_SIZE/grid.x; // Is this the number of threads per block? NUM_GALAXIES/block.x;
// SUBMATRIX is the number of threads per warp? Per kernel call?
////////////////////////////////////////////////////////////////////////////
hipMalloc((void **) &d_alpha0, size_of_galaxy_array0 );
hipMalloc((void **) &d_delta0, size_of_galaxy_array0 );
hipMalloc((void **) &d_alpha1, size_of_galaxy_array1 );
hipMalloc((void **) &d_delta1, size_of_galaxy_array1 );
// Check to see if we allocated enough memory.
if (0==d_alpha0 || 0==d_delta0 || 0==d_alpha1 || 0==d_delta1 || 0==dev_hist)
{
printf("couldn't allocate memory\n");
return 1;
}
// Initialize array to all 0's
hipMemset(d_alpha0,0,size_of_galaxy_array0);
hipMemset(d_delta0,0,size_of_galaxy_array0);
hipMemset(d_alpha1,0,size_of_galaxy_array1);
hipMemset(d_delta1,0,size_of_galaxy_array1);
hipMemcpy(d_alpha0, h_alpha0, size_of_galaxy_array0, hipMemcpyHostToDevice );
hipMemcpy(d_delta0, h_delta0, size_of_galaxy_array0, hipMemcpyHostToDevice );
hipMemcpy(d_alpha1, h_alpha1, size_of_galaxy_array1, hipMemcpyHostToDevice );
hipMemcpy(d_delta1, h_delta1, size_of_galaxy_array1, hipMemcpyHostToDevice );
int x, y;
int num_submatrices_x = NUM_GALAXIES0 / SUBMATRIX_SIZE;
int num_submatrices_y = NUM_GALAXIES1 / SUBMATRIX_SIZE;
// Take care of edges of matrix.
if (NUM_GALAXIES0%SUBMATRIX_SIZE != 0)
{
num_submatrices_x += 1;
}
if (NUM_GALAXIES1%SUBMATRIX_SIZE != 0)
{
num_submatrices_y += 1;
}
printf("Breaking down the calculations.\n");
printf("Number of submatrices: %dx%d\n",num_submatrices_x,num_submatrices_y);
printf("Number of calculations per submatrices: %dx%d\n",SUBMATRIX_SIZE,SUBMATRIX_SIZE);
int bin_index = 0;
for(int k = 0; k < num_submatrices_y; k++)
{
y = k*SUBMATRIX_SIZE;
//printf("%d %d\n",k,y);
for(int j = 0; j < num_submatrices_x; j++)
{
x = j*SUBMATRIX_SIZE;
//printf("----\n");
//printf("%d %d\t\t%d %d\n",k,y,j,x);
//printf("----\n");
// Set the histogram to all zeros each time.
hipMemset(dev_hist,0,size_hist_bytes);
int max_x = NUM_GALAXIES0;
int max_y = NUM_GALAXIES1;
hipLaunchKernelGGL(( distance), dim3(grid),dim3(block), 0, 0, d_alpha0, d_delta0,d_alpha1, d_delta1, x, y, max_x, max_y, dev_hist, hist_lower_range, hist_upper_range, nbins, hist_bin_width, log_binning_flag, two_different_files,conv_factor_angle);
hipMemcpy(hist, dev_hist, size_hist_bytes, hipMemcpyDeviceToHost);
////////////////////////////////////////////////////////////////////
// Sum up the histograms from each thread (hist).
////////////////////////////////////////////////////////////////////
for(int m=0; m<size_hist; m++)
{
bin_index = m%(nbins+2);
hist_array[bin_index] += hist[m];
}
}
}
unsigned long total = 0;
float lo = hist_lower_range;
float hi = 0;
for(int k=0; k<nbins+1; k++)
{
if (k==0)
{
//fprintf(outfile, "Underflow below %.3e %s %lu \n", lo, ",", hist_array[k]);
}
else
{
if (log_binning_flag==0)
{
hi = lo + hist_bin_width;
}
else if (log_binning_flag==1)
{
//printf("lo: %f\t\tlog(lo): %f\n",lo,log(lo));
hi = exp(log(lo) + hist_bin_width);
}
else if (log_binning_flag==2)
{
//printf("lo: %f\t\tlog10(lo): %f\n",lo,log10(lo));
hi = pow(10,(log10(lo) + hist_bin_width));
}
fprintf(outfile, "%.3e %.3e %lu \n",lo,hi,hist_array[k]);
total += hist_array[k];
lo = hi;
}
}
printf("total: %lu \n", total);
fclose(infile0);
fclose(infile1);
fclose(outfile);
free(h_alpha0);
free(h_delta0);
free(h_alpha1);
free(h_delta1);
free(hist);
hipFree(d_alpha0);
hipFree(d_delta0);
hipFree(d_alpha1);
hipFree(d_delta1);
hipFree(dev_hist);
return 0;
}
//////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
// Module that runs the calculations for input files in the format x:y:z in Mpc
////////////////////////////////////////////////////////////////////////
int doCalcMpc(FILE *infile0, FILE *infile1, FILE *outfile, bool silent_on_GPU_testing, float scale_factor, int nbins, float hist_lower_range, float hist_upper_range, float hist_bin_width, int log_binning_flag, bool two_different_files, float conv_factor_angle){
float *d_x0, *d_y0, *d_z0;
float *h_x0, *h_y0, *h_z0;
float *d_x1, *d_y1, *d_z1;
float *h_x1, *h_y1, *h_z1;
int NUM_GALAXIES0;
int NUM_GALAXIES1;
//////////////////////////////////////////////////////////////////////
// Read in the galaxy files.
////////////////////////////////////////////////////////////////////////////
// Read in the first file
////////////////////////////////////////////////////////////////////////////
fscanf(infile0, "%d", &NUM_GALAXIES0);
int size_of_galaxy_array0 = NUM_GALAXIES0 * sizeof(float);
printf("SIZE 0 # GALAXIES: %d\n",NUM_GALAXIES0);
h_x0 = (float*)malloc(size_of_galaxy_array0);
h_y0 = (float*)malloc(size_of_galaxy_array0);
h_z0 = (float*)malloc(size_of_galaxy_array0);
float temp0, temp1, temp2;
for(int i=0; i<NUM_GALAXIES0; i++)
{
fscanf(infile0, "%f %f", &temp0, &temp1, &temp2);
h_x0[i] = temp0/scale_factor;
h_y0[i] = temp1/scale_factor;
h_z0[i] = temp2/scale_factor;
//if (i<10)
//printf("%e %e\n", h_x0[i], h_y0[i], h_y0[i],);
}
////////////////////////////////////////////////////////////////////////////
// Read in the second file
////////////////////////////////////////////////////////////////////////////
fscanf(infile1, "%d", &NUM_GALAXIES1);
int size_of_galaxy_array1 = NUM_GALAXIES1 * sizeof(float);
printf("SIZE 1 # GALAXIES: %d\n",NUM_GALAXIES1);
h_x1 = (float*)malloc(size_of_galaxy_array1);
h_y1 = (float*)malloc(size_of_galaxy_array1);
h_z1 = (float*)malloc(size_of_galaxy_array1);
for(int i=0; i<NUM_GALAXIES1; i++)
{
fscanf(infile1, "%f %f", &temp0, &temp1, &temp2);
h_x1[i] = temp0/scale_factor;
h_y1[i] = temp1/scale_factor;
h_z1[i] = temp2/scale_factor;
//if (i<10)
//printf("%e %e\n", h_x1[i], h_y1[i], h_z1[i]);
}
// get device diagnostics
if (!silent_on_GPU_testing) getDeviceDiagnostics(NUM_GALAXIES0+NUM_GALAXIES1, 2);
////////////////////////////////////////////////////////////////////////////
// Allocation of histogram
///////////////////////////////////////////////////////////////////////////
int *hist, *dev_hist;
int size_hist = SUBMATRIX_SIZE * (nbins+2);
int size_hist_bytes = size_hist*sizeof(int);
hist = (int*)malloc(size_hist_bytes);
memset(hist, 0, size_hist_bytes);
printf("Size of histogram: %d bytes\n",size_hist_bytes);
hipMalloc((void **) &dev_hist, (size_hist_bytes));
hipMemset(dev_hist, 0, size_hist_bytes);
unsigned long *hist_array;
int hist_array_size = (nbins+2) * sizeof(unsigned long);
hist_array = (unsigned long*)malloc(hist_array_size);
printf("Size of histogram array: %d bytes\n",hist_array_size);
memset(hist_array,0,hist_array_size);
////////////////////////////////////////////////////////////////////////////
// Define the grid and block size
////////////////////////////////////////////////////////////////////////////
dim3 grid, block;
// 128*4 = 512, the amount of memory needed for one histogram.
// 8192*4 = 32768 is max memory to ask for for the histograms.
// 8192/128 = 64, is is the right number of blocks?
grid.x = 8192/(DEFAULT_NBINS+2); // Is this the number of blocks?
block.x = SUBMATRIX_SIZE/grid.x; // Is this the number of threads per block? NUM_GALAXIES/block.x;
// SUBMATRIX is the number of threads per warp? Per kernel call?
////////////////////////////////////////////////////////////////////////////
hipMalloc((void **) &d_x0, size_of_galaxy_array0 );
hipMalloc((void **) &d_y0, size_of_galaxy_array0 );
hipMalloc((void **) &d_z0, size_of_galaxy_array0 );
hipMalloc((void **) &d_x1, size_of_galaxy_array1 );
hipMalloc((void **) &d_y1, size_of_galaxy_array1 );
hipMalloc((void **) &d_z1, size_of_galaxy_array1 );
// Check to see if we allocated enough memory.
if (0==d_x0 || 0==d_y0 || 0==d_z0 || 0==d_x1 || 0==d_y1 || 0==d_z1 || 0==dev_hist)
{
printf("couldn't allocate memory\n");
return 1;
}
// Initialize array to all 0's
hipMemset(d_x0,0,size_of_galaxy_array0);
hipMemset(d_y0,0,size_of_galaxy_array0);
hipMemset(d_z0,0,size_of_galaxy_array0);
hipMemset(d_x1,0,size_of_galaxy_array1);
hipMemset(d_y1,0,size_of_galaxy_array1);
hipMemset(d_z1,0,size_of_galaxy_array1);
hipMemcpy(d_x0, h_x0, size_of_galaxy_array0, hipMemcpyHostToDevice );
hipMemcpy(d_y0, h_y0, size_of_galaxy_array0, hipMemcpyHostToDevice );
hipMemcpy(d_z0, h_z0, size_of_galaxy_array0, hipMemcpyHostToDevice );
hipMemcpy(d_x1, h_x1, size_of_galaxy_array1, hipMemcpyHostToDevice );
hipMemcpy(d_y1, h_y1, size_of_galaxy_array1, hipMemcpyHostToDevice );
hipMemcpy(d_z1, h_z1, size_of_galaxy_array1, hipMemcpyHostToDevice );
int x, y;
int num_submatrices_x = NUM_GALAXIES0 / SUBMATRIX_SIZE;
int num_submatrices_y = NUM_GALAXIES1 / SUBMATRIX_SIZE;
// Take care of edges of matrix.
if (NUM_GALAXIES0%SUBMATRIX_SIZE != 0)
{
num_submatrices_x += 1;
}
if (NUM_GALAXIES1%SUBMATRIX_SIZE != 0)
{
num_submatrices_y += 1;
}
printf("Breaking down the calculations.\n");
printf("Number of submatrices: %dx%d\n",num_submatrices_x,num_submatrices_y);
printf("Number of calculations per submatrices: %dx%d\n",SUBMATRIX_SIZE,SUBMATRIX_SIZE);
int bin_index = 0;
for(int k = 0; k < num_submatrices_y; k++)
{
y = k*SUBMATRIX_SIZE;
//printf("%d %d\n",k,y);
for(int j = 0; j < num_submatrices_x; j++)
{
x = j*SUBMATRIX_SIZE;
//printf("----\n");
//printf("%d %d\t\t%d %d\n",k,y,j,x);
//printf("----\n");
// Set the histogram to all zeros each time.
hipMemset(dev_hist,0,size_hist_bytes);
int max_x = NUM_GALAXIES0;
int max_y = NUM_GALAXIES1;
hipLaunchKernelGGL(( distanceMpc), dim3(grid),dim3(block), 0, 0, d_x0, d_y0, d_z0,d_x1, d_y1, d_z1, x, y, max_x, max_y, dev_hist, hist_lower_range, hist_upper_range, nbins, hist_bin_width, log_binning_flag, two_different_files,conv_factor_angle);
hipMemcpy(hist, dev_hist, size_hist_bytes, hipMemcpyDeviceToHost);
////////////////////////////////////////////////////////////////////
// Sum up the histograms from each thread (hist).
////////////////////////////////////////////////////////////////////
for(int m=0; m<size_hist; m++)
{
bin_index = m%(nbins+2);
hist_array[bin_index] += hist[m];
}
}
}
unsigned long total = 0;
float lo = hist_lower_range;
float hi = 0;
for(int k=0; k<nbins+1; k++)
{
if (k==0)
{
//fprintf(outfile, "Underflow below %.3e %s %lu \n", lo, ",", hist_array[k]);
}
else
{
if (log_binning_flag==0)
{
hi = lo + hist_bin_width;
}
else if (log_binning_flag==1)
{
//printf("lo: %f\t\tlog(lo): %f\n",lo,log(lo));
hi = exp(log(lo) + hist_bin_width);
}
else if (log_binning_flag==2)
{
//printf("lo: %f\t\tlog10(lo): %f\n",lo,log10(lo));
hi = pow(10,(log10(lo) + hist_bin_width));
}
fprintf(outfile, "%.3e %.3e %lu \n",lo,hi,hist_array[k]);
total += hist_array[k];
lo = hi;
}
}
printf("total: %lu \n", total);
fclose(infile0);
fclose(infile1);
fclose(outfile);
free(h_x0);
free(h_y0);
free(h_z0);
free(h_x1);
free(h_y1);
free(h_z1);
free(hist);
hipFree(d_x0);
hipFree(d_y0);
hipFree(d_z0);
hipFree(d_x1);
hipFree(d_y1);
hipFree(d_z1);
hipFree(dev_hist);
return 0;
}
//////////////////////////////////////////////////////////////////////
void getDeviceDiagnostics(int tot_gals, int n_coords){
////////////////////////////////////////////////////////////////////////////
// Now get the info from the device.
////////////////////////////////////////////////////////////////////////////
printf("\n------ CUDA device diagnostics ------\n\n");
int nx = SUBMATRIX_SIZE;
int ncalc = nx * nx;
int gpu_mem_needed = int(tot_gals * sizeof(float)) * n_coords; // need to allocate ra, dec.
printf("Requirements: %d calculations and %d bytes memory on the GPU \n\n", ncalc, gpu_mem_needed);
int deviceCount = 0;
hipError_t error_id = hipGetDeviceCount(&deviceCount);
if (error_id != hipSuccess) {
printf( "hipGetDeviceCount returned %d\n-> %s\n", (int)error_id, hipGetErrorString(error_id) );
}
// This function call returns 0 if there are no CUDA capable devices.
if (deviceCount == 0)
printf("There is no device supporting CUDA\n");
else
printf("Found %d CUDA Capable device(s)\n", deviceCount);
int dev=0;
for (dev = 0; dev < deviceCount; ++dev) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
printf(" Total amount of global memory: %.0f MBytes (%llu bytes)\n",
(float)deviceProp.totalGlobalMem/1048576.0f, (unsigned long long) deviceProp.totalGlobalMem);
printf(" Warp size: %d\n", deviceProp.warpSize);
printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
// does this device have enough capcacity for the calculation?
printf("\n*************\n");
// check memory
if((unsigned long long) deviceProp.totalGlobalMem < gpu_mem_needed) printf(" FAILURE: Not eneough memeory on device for this calculation! \n");
else
{
printf("Hurrah! This device has enough memory to perform this calculation\n");
// check # threads
int threadsPerBlock = deviceProp.maxThreadsPerBlock; // maximal efficiency exists if we use max # threads per block.
int blocksPerGrid = int(ceil(ncalc / threadsPerBlock)); // need nx*nx threads total
if(deviceProp.maxThreadsDim[0] >blocksPerGrid) printf("FAILURE: Not enough threads on the device to do this calculation!\n");
else
{
printf("Hurrah! This device supports enough threads to do this calculation\n");
// how many kernels can we run at once on this machine?
int n_mem = floor(deviceProp.totalGlobalMem / float(gpu_mem_needed));
int n_threads = floor(threadsPerBlock * deviceProp.maxThreadsDim[0]*deviceProp.maxThreadsDim[1] / float(ncalc) ); // max # threads possible?
printf("%d %d \n", n_threads, deviceProp.maxThreadsDim[0]);
int max_kernels = 0;
n_mem<n_threads ? max_kernels = n_mem : max_kernels = n_threads;
printf(" you can run %d kernels at a time on this device without overloading the resources \n", max_kernels);
}
}
}
printf("\n------ End CUDA device diagnostics ------\n\n");
}
////////////////////////////////////////////////////////////////////////////
| 146c0173d573080da8a9715d635ee3f57bf5495f.cu | #include<stdio.h>
#include<string.h>
#include<stdlib.h>
#include<math.h>
#include<unistd.h>
#include<cuda_runtime.h>
using namespace std;
#define SUBMATRIX_SIZE 16384
////////////////////////////////////////////////////////////////////////
// Number of histogram bins has to be edited by hand, prior to
// copmilation.
////////////////////////////////////////////////////////////////////////
#define DEFAULT_NBINS 254
//#define DEFAULT_NBINS 126
//#define DEFAULT_NBINS 62
//#define DEFAULT_NBINS 30
#define CONV_FACTOR 57.2957795 // 180/pi
int doCalcRaDec(FILE *infile0, FILE *infile1, FILE *outfile, bool silent_on_GPU_testing, float scale_factor, int nbins, float hist_lower_range, float hist_upper_range, float hist_bin_width, int log_binning_flag, bool two_different_files, float conv_factor_angle);
int doCalcMpc(FILE *infile0, FILE *infile1, FILE *outfile, bool silent_on_GPU_testing, float scale_factor, int nbins, float hist_lower_range, float hist_upper_range, float hist_bin_width, int log_binning_flag, bool two_different_files, float conv_factor_angle);
void getDeviceDiagnostics(int tot_Gals, int n_coords);
////////////////////////////////////////////////////////////////////////
// Kernel to calculate angular distances between galaxies and histogram
// the distances.
////////////////////////////////////////////////////////////////////////
__global__ void distance(volatile float *a0, volatile float *d0, volatile float *a1, volatile float *d1, int xind, int yind, int max_xind, int max_yind, volatile int *dev_hist, float hist_min, float hist_max, int nbins, float bin_width, int log_binning=0, bool two_different_files=1, float conv_factor_angle=57.2957795)
{
////////////////////////////////////////////////////////////////////////////
// Idx will keep track of which thread is being calculated within a given
// warp.
////////////////////////////////////////////////////////////////////////////
int idx = blockIdx.x * blockDim.x + threadIdx.x; // This should range to SUBMATRIX_SIZE
idx += xind;
////////////////////////////////////////////////////////////////////////
// Shared memory stuff.
////////////////////////////////////////////////////////////////////////
__shared__ int shared_hist[DEFAULT_NBINS+2];
// Note that we only clear things out for the first thread on each block.
if(threadIdx.x==0)
{
for (int i=0;i<nbins+2;i++)
shared_hist[i] = 0;
}
__syncthreads();
////////////////////////////////////////////////////////////////////////
if (idx<max_xind)
{
int i=0;
float alpha_rad = a0[idx];
float delta0 = d0[idx];
float cos_d0 = cos(delta0);
float sin_d0 = sin(delta0);
float dist;
int bin_index = 0;
float a_diff, sin_a_diff, cos_a_diff;
float cos_d1, sin_d1, numer, denom, mult1, mult2;
float d1_rad;
bool do_calc = 1;
int ymax = yind + SUBMATRIX_SIZE;
if (ymax>max_yind)
{
ymax = max_yind;
}
for(i=yind; i<ymax; i++)
{
if (two_different_files)
{
do_calc = 1;
}
else // Doing the same file
{
if(idx > i)
do_calc=1;
else
do_calc=0;
}
//if(idx > i) ///////// CHECK THIS
if (do_calc)
{
a_diff = a1[i] - alpha_rad;
d1_rad = d1[i];
sin_a_diff = sin(a_diff);
cos_a_diff = cos(a_diff);
sin_d1 = sin(d1_rad);
cos_d1 = cos(d1_rad);
mult1 = cos_d1 * cos_d1 * sin_a_diff * sin_a_diff;
mult2 = cos_d0 * sin_d1 - sin_d0 * cos_d1 * cos_a_diff;
mult2 = mult2 * mult2;
numer = sqrt(mult1 + mult2);
denom = sin_d0 *sin_d1 + cos_d0 * cos_d1 * cos_a_diff;
dist = atan2(numer,denom);
dist *= conv_factor_angle; // Convert to degrees or what have you.
if(dist < hist_min)
bin_index = 0;
else if(dist >= hist_max)
bin_index = nbins + 1;
else
{
if (log_binning==0)
{
bin_index = int((dist-hist_min)/bin_width) + 1;
}
else if (log_binning==1)// log binning
{
bin_index = int((log(dist)-log(hist_min))/bin_width) + 1;
}
else if (log_binning==2)// log 10 binning
{
bin_index = int((log10(dist)-log10(hist_min))/bin_width) + 1;
}
}
atomicAdd(&shared_hist[bin_index],1);
}
}
}
__syncthreads();
if(threadIdx.x==0)
{
for(int i=0;i<nbins+2;i++)
dev_hist[i+(blockIdx.x*(nbins+2))]=shared_hist[i];
}
}
////////////////////////////////////////////////////////////////////////
// Kernel to calculate angular distances between galaxies and histogram
// the distances.
// Assuming coordinates are already in x,y,z (in Mpc)
////////////////////////////////////////////////////////////////////////
__global__ void distanceMpc(volatile float *x0, volatile float *y0, volatile float *z0, volatile float *x1, volatile float *y1, volatile float *z1, int xind, int yind, int max_xind, int max_yind, volatile int *dev_hist, float hist_min, float hist_max, int nbins, float bin_width, int log_binning=0, bool two_different_files=1, float conv_factor_angle=57.2957795)
{
////////////////////////////////////////////////////////////////////////////
// Idx will keep track of which thread is being calculated within a given
// warp.
////////////////////////////////////////////////////////////////////////////
int idx = blockIdx.x * blockDim.x + threadIdx.x; // This should range to SUBMATRIX_SIZE
idx += xind;
////////////////////////////////////////////////////////////////////////
// Shared memory stuff.
////////////////////////////////////////////////////////////////////////
__shared__ int shared_hist[DEFAULT_NBINS+2];
// Note that we only clear things out for the first thread on each block.
if(threadIdx.x==0)
{
for (int i=0;i<nbins+2;i++)
shared_hist[i] = 0;
}
__syncthreads();
////////////////////////////////////////////////////////////////////////
if (idx<max_xind)
{
int i=0;
float dist, xdiff, ydiff, zdiff;
int bin_index = 0;
bool do_calc = 1;
int ymax = yind + SUBMATRIX_SIZE;
if (ymax>max_yind)
{
ymax = max_yind;
}
for(i=yind; i<ymax; i++)
{
if (two_different_files)
{
do_calc = 1;
}
else // Doing the same file
{
if(idx > i)
do_calc=1;
else
do_calc=0;
}
//if(idx > i) ///////// CHECK THIS
if (do_calc)
{
// this is a way simpler calculation. We already have the x,y,z coodis in co-moving distance, so we can simply do the distance
xdiff = x0[idx] - x1[idx];
ydiff = y0[idx] - y1[idx];
zdiff = z0[idx] - z1[idx];
dist = sqrt( (xdiff*xdiff) + (ydiff*ydiff) + (zdiff*zdiff));
if(dist < hist_min)
bin_index = 0;
else if(dist >= hist_max)
bin_index = nbins + 1;
else
{
if (log_binning==0)
{
bin_index = int((dist-hist_min)/bin_width) + 1;
}
else if (log_binning==1)// log binning
{
bin_index = int((log(dist)-log(hist_min))/bin_width) + 1;
}
else if (log_binning==2)// log 10 binning
{
bin_index = int((log10(dist)-log10(hist_min))/bin_width) + 1;
}
}
atomicAdd(&shared_hist[bin_index],1);
}
}
}
__syncthreads();
if(threadIdx.x==0)
{
for(int i=0;i<nbins+2;i++)
dev_hist[i+(blockIdx.x*(nbins+2))]=shared_hist[i];
}
}
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
// Main
////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
// Needed for parsing command-line arguments.
extern char *optarg;
extern int optind, optopt, opterr;
int c;
char *outfilename = NULL;
char defaultoutfilename[256];
sprintf(defaultoutfilename,"default_out.dat");
float hist_lower_range = 0.0000001;
float hist_upper_range = 0;
int nbins = DEFAULT_NBINS;
float hist_bin_width = 0.05;
int log_binning_flag = 0; // False
float scale_factor = 1.0; // For if we need to convert input to arcsec or arcmin
float conv_factor_angle = 57.2957795; // 180/pi // For if we need to convert arcdistance to arcsec or arcmin
int radec_input = 1; // are we using ra/dec coords, or x/y/z coords?
bool silent_on_GPU_testing = false;
int cuda_device = 0;
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
while ((c = getopt(argc, argv, "ao:L:l:w:smSd:")) != -1) {
switch(c) {
case 'L':
printf("L is set\n");
hist_lower_range = atof(optarg);
break;
case 'w':
hist_bin_width = atof(optarg);
printf("Histogram bin width: %f\n",hist_bin_width);
break;
case 'l':
log_binning_flag = atoi(optarg);
printf("Will use log binning.\n");
break;
case 's':
scale_factor = 206264.0; // To convert arcseconds to radians.
conv_factor_angle *= 3600.0; // convert radians to arcseconds.
printf("Reading in values assuming they are arcseconds.\n");
printf("scale_factor: %f\n",scale_factor);
printf("conv_factor_angle: %f\n",conv_factor_angle);
break;
case 'm':
scale_factor = 3437.74677; // To convert arcminutes to radians.
conv_factor_angle *= 60.0; // convert radians to arcminutes.
printf("scale_factor: %f\n",scale_factor);
printf("conv_factor_angle: %f\n",conv_factor_angle);
printf("Reading in values assuming they are arcminutes.\n");
break;
case 'o':
outfilename = optarg;
printf("Output filename is %s\n", outfilename);
break;
case 'd':
cuda_device = atoi(optarg); // Use this CUDA device.
conv_factor_angle *= 3600.0; // convert radians to arcseconds.
printf("Will attempt to use CUDA device %d\n",cuda_device);
break;
case 'S':
printf("Silent mode - don't run the GPU test (suppresses some output)\n");
silent_on_GPU_testing = true;
break;
case 'p':
printf("Using input files in Mpc format");
radec_input = 0;
break;
case '?':
printf("unknown arg %c\n", optopt);
break;
}
}
if (argc < 2)
{
printf("\nMust pass in at least two input files on command line!\n");
printf("\nUsage: ", argv[0] );
//printf(" <cluster_data file> <distances file> \n\n");
exit(1);
}
// Set a default output file name, if none was passed in on the
// command line.
if (outfilename == NULL)
{
outfilename = defaultoutfilename;
printf("Output filename is %s\n", outfilename);
}
float temp_lo = hist_lower_range;
if (hist_upper_range == 0)
{
if (log_binning_flag==0)
{
for (int i=0;i<nbins;i++)
{
hist_upper_range = temp_lo + hist_bin_width;
temp_lo = hist_upper_range;
}
}
else if (log_binning_flag==1)
{
for (int i=0;i<nbins;i++)
{
hist_upper_range = exp(log(temp_lo) + hist_bin_width);
temp_lo = hist_upper_range;
}
}
else if (log_binning_flag==2)
{
for (int i=0;i<nbins;i++)
{
hist_upper_range = pow(10,(log10(temp_lo) + hist_bin_width));
temp_lo = hist_upper_range;
}
}
}
printf("hist_upper_range: %f\n",hist_upper_range);
FILE *infile0, *infile1, *outfile ;
infile0 = fopen(argv[optind],"r");
infile1 = fopen(argv[optind+1],"r");
printf("Opening input file 0: %s\n",argv[optind]);
printf("Opening input file 1: %s\n",argv[optind+1]);
outfile = fopen(outfilename, "w");
////////////////////////////////////////////////////////////////////////////
// Check to see if the two files are actually the same file.
// This is the case for the DD and RR calculations and change slightly
// the exact calculations being performed.
////////////////////////////////////////////////////////////////////////////
bool two_different_files = 1;
if (strcmp(argv[optind],argv[optind+1])==0)
{
two_different_files = 0;
printf("Using the same file!\n");
}
printf("\n");
////////////////////////////////////////////////////////////////////////
// Set the CUDA device. This is useful if your machine has multiple GPUs
// on it.
////////////////////////////////////////////////////////////////////////
cudaError_t error_id = cudaSetDevice(cuda_device);
if (error_id == cudaSuccess) {
printf( "cudaSetDevice returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id) );
}
else{
printf( "cudaSetDevice failed on Device %d!\n\n",cuda_device);
exit(-1);
}
if(radec_input==1) int success = doCalcRaDec(infile0, infile1, outfile, silent_on_GPU_testing, scale_factor, nbins, hist_lower_range, hist_upper_range, hist_bin_width, log_binning_flag, two_different_files, conv_factor_angle);
else int success = doCalcMpc(infile0, infile1, outfile, silent_on_GPU_testing, scale_factor, nbins, hist_lower_range, hist_upper_range, hist_bin_width, log_binning_flag, two_different_files, conv_factor_angle);
}
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
// Module that runs the calculations for input files in the format ra:dec
////////////////////////////////////////////////////////////////////////
int doCalcRaDec(FILE *infile0, FILE *infile1, FILE *outfile, bool silent_on_GPU_testing, float scale_factor, int nbins, float hist_lower_range, float hist_upper_range, float hist_bin_width, int log_binning_flag, bool two_different_files, float conv_factor_angle){
float *d_alpha0, *d_delta0;
float *h_alpha0, *h_delta0;
float *d_alpha1, *d_delta1;
float *h_alpha1, *h_delta1;
int NUM_GALAXIES0;
int NUM_GALAXIES1;
//////////////////////////////////////////////////////////////////////
// Read in the galaxy files.
////////////////////////////////////////////////////////////////////////////
// Read in the first file
////////////////////////////////////////////////////////////////////////////
fscanf(infile0, "%d", &NUM_GALAXIES0);
int size_of_galaxy_array0 = NUM_GALAXIES0 * sizeof(float);
printf("SIZE 0 # GALAXIES: %d\n",NUM_GALAXIES0);
h_alpha0 = (float*)malloc(size_of_galaxy_array0);
h_delta0 = (float*)malloc(size_of_galaxy_array0);
float temp0, temp1;
for(int i=0; i<NUM_GALAXIES0; i++)
{
fscanf(infile0, "%f %f", &temp0, &temp1);
h_alpha0[i] = temp0/scale_factor;
h_delta0[i] = temp1/scale_factor;
//if (i<10)
//printf("%e %e\n", h_alpha0[i], h_delta0[i]);
}
////////////////////////////////////////////////////////////////////////////
// Read in the second file
////////////////////////////////////////////////////////////////////////////
fscanf(infile1, "%d", &NUM_GALAXIES1);
int size_of_galaxy_array1 = NUM_GALAXIES1 * sizeof(float);
printf("SIZE 1 # GALAXIES: %d\n",NUM_GALAXIES1);
h_alpha1 = (float*)malloc(size_of_galaxy_array1);
h_delta1 = (float*)malloc(size_of_galaxy_array1);
for(int i=0; i<NUM_GALAXIES1; i++)
{
fscanf(infile1, "%f %f", &temp0, &temp1);
h_alpha1[i] = temp0/scale_factor;
h_delta1[i] = temp1/scale_factor;
//if (i<10)
//printf("%e %e\n", h_alpha1[i], h_delta1[i]);
}
//get device diagnostics
if (!silent_on_GPU_testing) getDeviceDiagnostics(NUM_GALAXIES0+NUM_GALAXIES1, 2);
////////////////////////////////////////////////////////////////////////////
// Allocation of histogram
///////////////////////////////////////////////////////////////////////////
int *hist, *dev_hist;
int size_hist = SUBMATRIX_SIZE * (nbins+2);
int size_hist_bytes = size_hist*sizeof(int);
hist = (int*)malloc(size_hist_bytes);
memset(hist, 0, size_hist_bytes);
printf("Size of histogram: %d bytes\n",size_hist_bytes);
cudaMalloc((void **) &dev_hist, (size_hist_bytes));
cudaMemset(dev_hist, 0, size_hist_bytes);
unsigned long *hist_array;
int hist_array_size = (nbins+2) * sizeof(unsigned long);
hist_array = (unsigned long*)malloc(hist_array_size);
printf("Size of histogram array: %d bytes\n",hist_array_size);
memset(hist_array,0,hist_array_size);
////////////////////////////////////////////////////////////////////////////
// Define the grid and block size
////////////////////////////////////////////////////////////////////////////
dim3 grid, block;
// 128*4 = 512, the amount of memory needed for one histogram.
// 8192*4 = 32768 is max memory to ask for for the histograms.
// 8192/128 = 64, is is the right number of blocks?
grid.x = 8192/(DEFAULT_NBINS+2); // Is this the number of blocks?
block.x = SUBMATRIX_SIZE/grid.x; // Is this the number of threads per block? NUM_GALAXIES/block.x;
// SUBMATRIX is the number of threads per warp? Per kernel call?
////////////////////////////////////////////////////////////////////////////
cudaMalloc((void **) &d_alpha0, size_of_galaxy_array0 );
cudaMalloc((void **) &d_delta0, size_of_galaxy_array0 );
cudaMalloc((void **) &d_alpha1, size_of_galaxy_array1 );
cudaMalloc((void **) &d_delta1, size_of_galaxy_array1 );
// Check to see if we allocated enough memory.
if (0==d_alpha0 || 0==d_delta0 || 0==d_alpha1 || 0==d_delta1 || 0==dev_hist)
{
printf("couldn't allocate memory\n");
return 1;
}
// Initialize array to all 0's
cudaMemset(d_alpha0,0,size_of_galaxy_array0);
cudaMemset(d_delta0,0,size_of_galaxy_array0);
cudaMemset(d_alpha1,0,size_of_galaxy_array1);
cudaMemset(d_delta1,0,size_of_galaxy_array1);
cudaMemcpy(d_alpha0, h_alpha0, size_of_galaxy_array0, cudaMemcpyHostToDevice );
cudaMemcpy(d_delta0, h_delta0, size_of_galaxy_array0, cudaMemcpyHostToDevice );
cudaMemcpy(d_alpha1, h_alpha1, size_of_galaxy_array1, cudaMemcpyHostToDevice );
cudaMemcpy(d_delta1, h_delta1, size_of_galaxy_array1, cudaMemcpyHostToDevice );
int x, y;
int num_submatrices_x = NUM_GALAXIES0 / SUBMATRIX_SIZE;
int num_submatrices_y = NUM_GALAXIES1 / SUBMATRIX_SIZE;
// Take care of edges of matrix.
if (NUM_GALAXIES0%SUBMATRIX_SIZE != 0)
{
num_submatrices_x += 1;
}
if (NUM_GALAXIES1%SUBMATRIX_SIZE != 0)
{
num_submatrices_y += 1;
}
printf("Breaking down the calculations.\n");
printf("Number of submatrices: %dx%d\n",num_submatrices_x,num_submatrices_y);
printf("Number of calculations per submatrices: %dx%d\n",SUBMATRIX_SIZE,SUBMATRIX_SIZE);
int bin_index = 0;
for(int k = 0; k < num_submatrices_y; k++)
{
y = k*SUBMATRIX_SIZE;
//printf("%d %d\n",k,y);
for(int j = 0; j < num_submatrices_x; j++)
{
x = j*SUBMATRIX_SIZE;
//printf("----\n");
//printf("%d %d\t\t%d %d\n",k,y,j,x);
//printf("----\n");
// Set the histogram to all zeros each time.
cudaMemset(dev_hist,0,size_hist_bytes);
int max_x = NUM_GALAXIES0;
int max_y = NUM_GALAXIES1;
distance<<<grid,block>>>(d_alpha0, d_delta0,d_alpha1, d_delta1, x, y, max_x, max_y, dev_hist, hist_lower_range, hist_upper_range, nbins, hist_bin_width, log_binning_flag, two_different_files,conv_factor_angle);
cudaMemcpy(hist, dev_hist, size_hist_bytes, cudaMemcpyDeviceToHost);
////////////////////////////////////////////////////////////////////
// Sum up the histograms from each thread (hist).
////////////////////////////////////////////////////////////////////
for(int m=0; m<size_hist; m++)
{
bin_index = m%(nbins+2);
hist_array[bin_index] += hist[m];
}
}
}
unsigned long total = 0;
float lo = hist_lower_range;
float hi = 0;
for(int k=0; k<nbins+1; k++)
{
if (k==0)
{
//fprintf(outfile, "Underflow below %.3e %s %lu \n", lo, ",", hist_array[k]);
}
else
{
if (log_binning_flag==0)
{
hi = lo + hist_bin_width;
}
else if (log_binning_flag==1)
{
//printf("lo: %f\t\tlog(lo): %f\n",lo,log(lo));
hi = exp(log(lo) + hist_bin_width);
}
else if (log_binning_flag==2)
{
//printf("lo: %f\t\tlog10(lo): %f\n",lo,log10(lo));
hi = pow(10,(log10(lo) + hist_bin_width));
}
fprintf(outfile, "%.3e %.3e %lu \n",lo,hi,hist_array[k]);
total += hist_array[k];
lo = hi;
}
}
printf("total: %lu \n", total);
fclose(infile0);
fclose(infile1);
fclose(outfile);
free(h_alpha0);
free(h_delta0);
free(h_alpha1);
free(h_delta1);
free(hist);
cudaFree(d_alpha0);
cudaFree(d_delta0);
cudaFree(d_alpha1);
cudaFree(d_delta1);
cudaFree(dev_hist);
return 0;
}
//////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////
// Module that runs the calculations for input files in the format x:y:z in Mpc
////////////////////////////////////////////////////////////////////////
int doCalcMpc(FILE *infile0, FILE *infile1, FILE *outfile, bool silent_on_GPU_testing, float scale_factor, int nbins, float hist_lower_range, float hist_upper_range, float hist_bin_width, int log_binning_flag, bool two_different_files, float conv_factor_angle){
float *d_x0, *d_y0, *d_z0;
float *h_x0, *h_y0, *h_z0;
float *d_x1, *d_y1, *d_z1;
float *h_x1, *h_y1, *h_z1;
int NUM_GALAXIES0;
int NUM_GALAXIES1;
//////////////////////////////////////////////////////////////////////
// Read in the galaxy files.
////////////////////////////////////////////////////////////////////////////
// Read in the first file
////////////////////////////////////////////////////////////////////////////
fscanf(infile0, "%d", &NUM_GALAXIES0);
int size_of_galaxy_array0 = NUM_GALAXIES0 * sizeof(float);
printf("SIZE 0 # GALAXIES: %d\n",NUM_GALAXIES0);
h_x0 = (float*)malloc(size_of_galaxy_array0);
h_y0 = (float*)malloc(size_of_galaxy_array0);
h_z0 = (float*)malloc(size_of_galaxy_array0);
float temp0, temp1, temp2;
for(int i=0; i<NUM_GALAXIES0; i++)
{
fscanf(infile0, "%f %f", &temp0, &temp1, &temp2);
h_x0[i] = temp0/scale_factor;
h_y0[i] = temp1/scale_factor;
h_z0[i] = temp2/scale_factor;
//if (i<10)
//printf("%e %e\n", h_x0[i], h_y0[i], h_y0[i],);
}
////////////////////////////////////////////////////////////////////////////
// Read in the second file
////////////////////////////////////////////////////////////////////////////
fscanf(infile1, "%d", &NUM_GALAXIES1);
int size_of_galaxy_array1 = NUM_GALAXIES1 * sizeof(float);
printf("SIZE 1 # GALAXIES: %d\n",NUM_GALAXIES1);
h_x1 = (float*)malloc(size_of_galaxy_array1);
h_y1 = (float*)malloc(size_of_galaxy_array1);
h_z1 = (float*)malloc(size_of_galaxy_array1);
for(int i=0; i<NUM_GALAXIES1; i++)
{
fscanf(infile1, "%f %f", &temp0, &temp1, &temp2);
h_x1[i] = temp0/scale_factor;
h_y1[i] = temp1/scale_factor;
h_z1[i] = temp2/scale_factor;
//if (i<10)
//printf("%e %e\n", h_x1[i], h_y1[i], h_z1[i]);
}
// get device diagnostics
if (!silent_on_GPU_testing) getDeviceDiagnostics(NUM_GALAXIES0+NUM_GALAXIES1, 2);
////////////////////////////////////////////////////////////////////////////
// Allocation of histogram
///////////////////////////////////////////////////////////////////////////
int *hist, *dev_hist;
int size_hist = SUBMATRIX_SIZE * (nbins+2);
int size_hist_bytes = size_hist*sizeof(int);
hist = (int*)malloc(size_hist_bytes);
memset(hist, 0, size_hist_bytes);
printf("Size of histogram: %d bytes\n",size_hist_bytes);
cudaMalloc((void **) &dev_hist, (size_hist_bytes));
cudaMemset(dev_hist, 0, size_hist_bytes);
unsigned long *hist_array;
int hist_array_size = (nbins+2) * sizeof(unsigned long);
hist_array = (unsigned long*)malloc(hist_array_size);
printf("Size of histogram array: %d bytes\n",hist_array_size);
memset(hist_array,0,hist_array_size);
////////////////////////////////////////////////////////////////////////////
// Define the grid and block size
////////////////////////////////////////////////////////////////////////////
dim3 grid, block;
// 128*4 = 512, the amount of memory needed for one histogram.
// 8192*4 = 32768 is max memory to ask for for the histograms.
// 8192/128 = 64, is is the right number of blocks?
grid.x = 8192/(DEFAULT_NBINS+2); // Is this the number of blocks?
block.x = SUBMATRIX_SIZE/grid.x; // Is this the number of threads per block? NUM_GALAXIES/block.x;
// SUBMATRIX is the number of threads per warp? Per kernel call?
////////////////////////////////////////////////////////////////////////////
cudaMalloc((void **) &d_x0, size_of_galaxy_array0 );
cudaMalloc((void **) &d_y0, size_of_galaxy_array0 );
cudaMalloc((void **) &d_z0, size_of_galaxy_array0 );
cudaMalloc((void **) &d_x1, size_of_galaxy_array1 );
cudaMalloc((void **) &d_y1, size_of_galaxy_array1 );
cudaMalloc((void **) &d_z1, size_of_galaxy_array1 );
// Check to see if we allocated enough memory.
if (0==d_x0 || 0==d_y0 || 0==d_z0 || 0==d_x1 || 0==d_y1 || 0==d_z1 || 0==dev_hist)
{
printf("couldn't allocate memory\n");
return 1;
}
// Initialize array to all 0's
cudaMemset(d_x0,0,size_of_galaxy_array0);
cudaMemset(d_y0,0,size_of_galaxy_array0);
cudaMemset(d_z0,0,size_of_galaxy_array0);
cudaMemset(d_x1,0,size_of_galaxy_array1);
cudaMemset(d_y1,0,size_of_galaxy_array1);
cudaMemset(d_z1,0,size_of_galaxy_array1);
cudaMemcpy(d_x0, h_x0, size_of_galaxy_array0, cudaMemcpyHostToDevice );
cudaMemcpy(d_y0, h_y0, size_of_galaxy_array0, cudaMemcpyHostToDevice );
cudaMemcpy(d_z0, h_z0, size_of_galaxy_array0, cudaMemcpyHostToDevice );
cudaMemcpy(d_x1, h_x1, size_of_galaxy_array1, cudaMemcpyHostToDevice );
cudaMemcpy(d_y1, h_y1, size_of_galaxy_array1, cudaMemcpyHostToDevice );
cudaMemcpy(d_z1, h_z1, size_of_galaxy_array1, cudaMemcpyHostToDevice );
int x, y;
int num_submatrices_x = NUM_GALAXIES0 / SUBMATRIX_SIZE;
int num_submatrices_y = NUM_GALAXIES1 / SUBMATRIX_SIZE;
// Take care of edges of matrix.
if (NUM_GALAXIES0%SUBMATRIX_SIZE != 0)
{
num_submatrices_x += 1;
}
if (NUM_GALAXIES1%SUBMATRIX_SIZE != 0)
{
num_submatrices_y += 1;
}
printf("Breaking down the calculations.\n");
printf("Number of submatrices: %dx%d\n",num_submatrices_x,num_submatrices_y);
printf("Number of calculations per submatrices: %dx%d\n",SUBMATRIX_SIZE,SUBMATRIX_SIZE);
int bin_index = 0;
for(int k = 0; k < num_submatrices_y; k++)
{
y = k*SUBMATRIX_SIZE;
//printf("%d %d\n",k,y);
for(int j = 0; j < num_submatrices_x; j++)
{
x = j*SUBMATRIX_SIZE;
//printf("----\n");
//printf("%d %d\t\t%d %d\n",k,y,j,x);
//printf("----\n");
// Set the histogram to all zeros each time.
cudaMemset(dev_hist,0,size_hist_bytes);
int max_x = NUM_GALAXIES0;
int max_y = NUM_GALAXIES1;
distanceMpc<<<grid,block>>>(d_x0, d_y0, d_z0,d_x1, d_y1, d_z1, x, y, max_x, max_y, dev_hist, hist_lower_range, hist_upper_range, nbins, hist_bin_width, log_binning_flag, two_different_files,conv_factor_angle);
cudaMemcpy(hist, dev_hist, size_hist_bytes, cudaMemcpyDeviceToHost);
////////////////////////////////////////////////////////////////////
// Sum up the histograms from each thread (hist).
////////////////////////////////////////////////////////////////////
for(int m=0; m<size_hist; m++)
{
bin_index = m%(nbins+2);
hist_array[bin_index] += hist[m];
}
}
}
unsigned long total = 0;
float lo = hist_lower_range;
float hi = 0;
for(int k=0; k<nbins+1; k++)
{
if (k==0)
{
//fprintf(outfile, "Underflow below %.3e %s %lu \n", lo, ",", hist_array[k]);
}
else
{
if (log_binning_flag==0)
{
hi = lo + hist_bin_width;
}
else if (log_binning_flag==1)
{
//printf("lo: %f\t\tlog(lo): %f\n",lo,log(lo));
hi = exp(log(lo) + hist_bin_width);
}
else if (log_binning_flag==2)
{
//printf("lo: %f\t\tlog10(lo): %f\n",lo,log10(lo));
hi = pow(10,(log10(lo) + hist_bin_width));
}
fprintf(outfile, "%.3e %.3e %lu \n",lo,hi,hist_array[k]);
total += hist_array[k];
lo = hi;
}
}
printf("total: %lu \n", total);
fclose(infile0);
fclose(infile1);
fclose(outfile);
free(h_x0);
free(h_y0);
free(h_z0);
free(h_x1);
free(h_y1);
free(h_z1);
free(hist);
cudaFree(d_x0);
cudaFree(d_y0);
cudaFree(d_z0);
cudaFree(d_x1);
cudaFree(d_y1);
cudaFree(d_z1);
cudaFree(dev_hist);
return 0;
}
//////////////////////////////////////////////////////////////////////
void getDeviceDiagnostics(int tot_gals, int n_coords){
////////////////////////////////////////////////////////////////////////////
// Now get the info from the device.
////////////////////////////////////////////////////////////////////////////
printf("\n------ CUDA device diagnostics ------\n\n");
int nx = SUBMATRIX_SIZE;
int ncalc = nx * nx;
int gpu_mem_needed = int(tot_gals * sizeof(float)) * n_coords; // need to allocate ra, dec.
printf("Requirements: %d calculations and %d bytes memory on the GPU \n\n", ncalc, gpu_mem_needed);
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (error_id != cudaSuccess) {
printf( "cudaGetDeviceCount returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id) );
}
// This function call returns 0 if there are no CUDA capable devices.
if (deviceCount == 0)
printf("There is no device supporting CUDA\n");
else
printf("Found %d CUDA Capable device(s)\n", deviceCount);
int dev=0;
for (dev = 0; dev < deviceCount; ++dev) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
printf(" Total amount of global memory: %.0f MBytes (%llu bytes)\n",
(float)deviceProp.totalGlobalMem/1048576.0f, (unsigned long long) deviceProp.totalGlobalMem);
printf(" Warp size: %d\n", deviceProp.warpSize);
printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock);
printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n",
deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n",
deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
// does this device have enough capcacity for the calculation?
printf("\n*************\n");
// check memory
if((unsigned long long) deviceProp.totalGlobalMem < gpu_mem_needed) printf(" FAILURE: Not eneough memeory on device for this calculation! \n");
else
{
printf("Hurrah! This device has enough memory to perform this calculation\n");
// check # threads
int threadsPerBlock = deviceProp.maxThreadsPerBlock; // maximal efficiency exists if we use max # threads per block.
int blocksPerGrid = int(ceil(ncalc / threadsPerBlock)); // need nx*nx threads total
if(deviceProp.maxThreadsDim[0] >blocksPerGrid) printf("FAILURE: Not enough threads on the device to do this calculation!\n");
else
{
printf("Hurrah! This device supports enough threads to do this calculation\n");
// how many kernels can we run at once on this machine?
int n_mem = floor(deviceProp.totalGlobalMem / float(gpu_mem_needed));
int n_threads = floor(threadsPerBlock * deviceProp.maxThreadsDim[0]*deviceProp.maxThreadsDim[1] / float(ncalc) ); // max # threads possible?
printf("%d %d \n", n_threads, deviceProp.maxThreadsDim[0]);
int max_kernels = 0;
n_mem<n_threads ? max_kernels = n_mem : max_kernels = n_threads;
printf(" you can run %d kernels at a time on this device without overloading the resources \n", max_kernels);
}
}
}
printf("\n------ End CUDA device diagnostics ------\n\n");
}
////////////////////////////////////////////////////////////////////////////
|
8f3a9923e94f82363ad8029fed3a3c5894906490.hip | // !!! This is a file automatically generated by hipify!!!
// headers
#include <stdio.h>
#include <hip/hip_runtime.h> // for CUDA
// global variables
int inputLength=5;
float *hostInput1=NULL;
float *hostInput2=NULL;
float *hostOutput=NULL;
float *deviceInput1=NULL;
float *deviceInput2=NULL;
float *deviceOutput=NULL;
// global kernel function definition
__global__ void vecAdd(float *in1,float *in2,float *out,int len)
{
// variable declarations
int i=blockIdx.x * blockDim.x + threadIdx.x;
// code
if(i < len)
{
out[i]=in1[i]+in2[i];
}
}
int main(int argc,char *argv[])
{
// function declarations
void cleanup(void);
// code
// allocate host-memory
hostInput1=(float *)malloc(inputLength * sizeof(float));
if(hostInput1== NULL)
{
printf("CPU Memory Fatal Error = Can Not Allocate Enough Memory For Host Input Array 1.\nExitting ...\n");
cleanup();
exit(EXIT_FAILURE);
}
hostInput2=(float *)malloc(inputLength * sizeof(float));
if(hostInput2== NULL)
{
printf("CPU Memory Fatal Error = Can Not Allocate Enough Memory For Host Input Array 2.\nExitting ...\n");
cleanup();
exit(EXIT_FAILURE);
}
hostOutput=(float *)malloc(inputLength * sizeof(float));
if(hostOutput== NULL)
{
printf("CPU Memory Fatal Error = Can Not Allocate Enough Memory For Host Output Array.\nExitting ...\n");
cleanup();
exit(EXIT_FAILURE);
}
// fill above input host vectors with arbitary but hard-coded data
hostInput1[0]=101.0;
hostInput1[1]=102.0;
hostInput1[2]=103.0;
hostInput1[3]=104.0;
hostInput1[4]=105.0;
hostInput2[0]=201.0;
hostInput2[1]=202.0;
hostInput2[2]=203.0;
hostInput2[3]=204.0;
hostInput2[4]=205.0;
// allocate device-memory
int size=inputLength * sizeof(float);
hipError_t err=hipSuccess;
err=hipMalloc((void **)&deviceInput1,size);
if(err!=hipSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No. %d.\nExitting ...\n",hipGetErrorString(err),__FILE__,__LINE__);
cleanup();
exit(EXIT_FAILURE);
}
err=hipMalloc((void **)&deviceInput2,size);
if(err!=hipSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No. %d.\nExitting ...\n",hipGetErrorString(err),__FILE__,__LINE__);
hipFree(deviceInput1);
cleanup();
exit(EXIT_FAILURE);
}
err=hipMalloc((void **)&deviceOutput,size);
if(err!=hipSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No. %d.\nExitting ...\n",hipGetErrorString(err),__FILE__,__LINE__);
cleanup();
exit(EXIT_FAILURE);
}
// copy host memory contents to device memory
err=hipMemcpy(deviceInput1,hostInput1,size,hipMemcpyHostToDevice);
if(err!=hipSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No. %d.\nExitting ...\n",hipGetErrorString(err),__FILE__,__LINE__);
cleanup();
exit(EXIT_FAILURE);
}
err=hipMemcpy(deviceInput2,hostInput2,size,hipMemcpyHostToDevice);
if(err!=hipSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No. %d.\nExitting ...\n",hipGetErrorString(err),__FILE__,__LINE__);
cleanup();
exit(EXIT_FAILURE);
}
// cuda kernel configuration
dim3 DimGrid=dim3(ceil(inputLength/256.0),1,1);
dim3 DimBlock=dim3(256,1,1);
hipLaunchKernelGGL(( vecAdd), dim3(DimGrid),dim3(DimBlock), 0, 0, deviceInput1,deviceInput2,deviceOutput,inputLength);
// copy device memory to host memory
err=hipMemcpy(hostOutput,deviceOutput,size,hipMemcpyDeviceToHost);
if(err!=hipSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No. %d.\nExitting ...\n",hipGetErrorString(err),__FILE__,__LINE__);
cleanup();
exit(EXIT_FAILURE);
}
// results
int i;
for(i=0;i<inputLength;i++)
{
printf("%f + %f = %f\n",hostInput1[i],hostInput2[i],hostOutput[i]);
}
// total cleanup
cleanup();
return(0);
}
void cleanup(void)
{
// code
// free allocated device-memory
if(deviceInput1)
{
hipFree(deviceInput1);
deviceInput1=NULL;
}
if(deviceInput2)
{
hipFree(deviceInput2);
deviceInput2=NULL;
}
if(deviceOutput)
{
hipFree(deviceOutput);
deviceOutput=NULL;
}
// free allocated host-memory
if(hostInput1)
{
free(hostInput1);
hostInput1=NULL;
}
if(hostInput2)
{
free(hostInput2);
hostInput2=NULL;
}
if(hostOutput)
{
free(hostOutput);
hostOutput=NULL;
}
}
| 8f3a9923e94f82363ad8029fed3a3c5894906490.cu | // headers
#include <stdio.h>
#include <cuda.h> // for CUDA
// global variables
int inputLength=5;
float *hostInput1=NULL;
float *hostInput2=NULL;
float *hostOutput=NULL;
float *deviceInput1=NULL;
float *deviceInput2=NULL;
float *deviceOutput=NULL;
// global kernel function definition
__global__ void vecAdd(float *in1,float *in2,float *out,int len)
{
// variable declarations
int i=blockIdx.x * blockDim.x + threadIdx.x;
// code
if(i < len)
{
out[i]=in1[i]+in2[i];
}
}
int main(int argc,char *argv[])
{
// function declarations
void cleanup(void);
// code
// allocate host-memory
hostInput1=(float *)malloc(inputLength * sizeof(float));
if(hostInput1== NULL)
{
printf("CPU Memory Fatal Error = Can Not Allocate Enough Memory For Host Input Array 1.\nExitting ...\n");
cleanup();
exit(EXIT_FAILURE);
}
hostInput2=(float *)malloc(inputLength * sizeof(float));
if(hostInput2== NULL)
{
printf("CPU Memory Fatal Error = Can Not Allocate Enough Memory For Host Input Array 2.\nExitting ...\n");
cleanup();
exit(EXIT_FAILURE);
}
hostOutput=(float *)malloc(inputLength * sizeof(float));
if(hostOutput== NULL)
{
printf("CPU Memory Fatal Error = Can Not Allocate Enough Memory For Host Output Array.\nExitting ...\n");
cleanup();
exit(EXIT_FAILURE);
}
// fill above input host vectors with arbitary but hard-coded data
hostInput1[0]=101.0;
hostInput1[1]=102.0;
hostInput1[2]=103.0;
hostInput1[3]=104.0;
hostInput1[4]=105.0;
hostInput2[0]=201.0;
hostInput2[1]=202.0;
hostInput2[2]=203.0;
hostInput2[3]=204.0;
hostInput2[4]=205.0;
// allocate device-memory
int size=inputLength * sizeof(float);
cudaError_t err=cudaSuccess;
err=cudaMalloc((void **)&deviceInput1,size);
if(err!=cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No. %d.\nExitting ...\n",cudaGetErrorString(err),__FILE__,__LINE__);
cleanup();
exit(EXIT_FAILURE);
}
err=cudaMalloc((void **)&deviceInput2,size);
if(err!=cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No. %d.\nExitting ...\n",cudaGetErrorString(err),__FILE__,__LINE__);
cudaFree(deviceInput1);
cleanup();
exit(EXIT_FAILURE);
}
err=cudaMalloc((void **)&deviceOutput,size);
if(err!=cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No. %d.\nExitting ...\n",cudaGetErrorString(err),__FILE__,__LINE__);
cleanup();
exit(EXIT_FAILURE);
}
// copy host memory contents to device memory
err=cudaMemcpy(deviceInput1,hostInput1,size,cudaMemcpyHostToDevice);
if(err!=cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No. %d.\nExitting ...\n",cudaGetErrorString(err),__FILE__,__LINE__);
cleanup();
exit(EXIT_FAILURE);
}
err=cudaMemcpy(deviceInput2,hostInput2,size,cudaMemcpyHostToDevice);
if(err!=cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No. %d.\nExitting ...\n",cudaGetErrorString(err),__FILE__,__LINE__);
cleanup();
exit(EXIT_FAILURE);
}
// cuda kernel configuration
dim3 DimGrid=dim3(ceil(inputLength/256.0),1,1);
dim3 DimBlock=dim3(256,1,1);
vecAdd<<<DimGrid,DimBlock>>>(deviceInput1,deviceInput2,deviceOutput,inputLength);
// copy device memory to host memory
err=cudaMemcpy(hostOutput,deviceOutput,size,cudaMemcpyDeviceToHost);
if(err!=cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No. %d.\nExitting ...\n",cudaGetErrorString(err),__FILE__,__LINE__);
cleanup();
exit(EXIT_FAILURE);
}
// results
int i;
for(i=0;i<inputLength;i++)
{
printf("%f + %f = %f\n",hostInput1[i],hostInput2[i],hostOutput[i]);
}
// total cleanup
cleanup();
return(0);
}
void cleanup(void)
{
// code
// free allocated device-memory
if(deviceInput1)
{
cudaFree(deviceInput1);
deviceInput1=NULL;
}
if(deviceInput2)
{
cudaFree(deviceInput2);
deviceInput2=NULL;
}
if(deviceOutput)
{
cudaFree(deviceOutput);
deviceOutput=NULL;
}
// free allocated host-memory
if(hostInput1)
{
free(hostInput1);
hostInput1=NULL;
}
if(hostInput2)
{
free(hostInput2);
hostInput2=NULL;
}
if(hostOutput)
{
free(hostOutput);
hostOutput=NULL;
}
}
|
2aed49539aa7953c833350cd58e23d3959bc5879.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "star2d2r-512-10-256_kernel.hu"
__device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; }
__global__ void kernel0_10(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 10;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 472;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_7_3;
double __reg_7_4;
double __reg_8_0;
double __reg_8_1;
double __reg_8_2;
double __reg_8_3;
double __reg_8_4;
double __reg_9_0;
double __reg_9_1;
double __reg_9_2;
double __reg_9_3;
double __reg_9_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __storeValid = __writeValid10;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC7(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC8(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC9(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_9_0, 0);
__LOAD(__reg_9_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_9_0, __reg_9_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_9_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_9_0, __reg_9_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_9_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_9_0, __reg_9_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_9_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_9_0, __reg_9_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_9_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_9_0, __reg_9_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_9_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_9_0, __reg_9_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_9_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_9_0, __reg_9_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_9_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_9_0, __reg_9_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_9_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_9_0, __reg_9_1, __reg_8_2, __reg_8_3, __reg_8_4);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_9_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(2, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(3, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(4, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(5, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(6, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(7, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(8, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(9, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(10, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(11, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(12, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__LOAD(__reg_0_3, 33);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(13, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__LOAD(__reg_0_4, 34);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(14, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_0, 35);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(15, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_1, 36);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(16, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__LOAD(__reg_0_2, 37);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(17, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__LOAD(__reg_0_3, 38);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(18, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__LOAD(__reg_0_4, 39);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(19, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_0, 40);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(20, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 33);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 34);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 35);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 36);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__LOAD(__reg_0_2, 37);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, 38);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, 39);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__LOAD(__reg_0_0, 40);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(20, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 41; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 20, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 20, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 20, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 20, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 19, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 18, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 17, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 16, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 15, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 14, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 13, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 12, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 11, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 10, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 9, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 8, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 7, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_0_4);
__STORE(__h - 6, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_0_4, __reg_0_0);
__STORE(__h - 5, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__STORE(__h - 4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_0_4);
__STORE(__h - 3, __reg_9_1, __reg_9_2, __reg_9_3, __reg_0_4, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 19, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 18, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 17, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 16, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 15, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 14, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 13, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 12, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 11, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 10, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 9, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 8, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 7, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 6, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_0_0);
__STORE(__h - 5, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__STORE(__h - 3, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_0_0);
__STORE(__h - 2, __reg_9_2, __reg_9_3, __reg_9_4, __reg_0_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 19, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 18, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 17, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 16, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 15, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 14, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 13, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 12, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 11, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 10, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 9, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 8, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 7, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 6, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1, __reg_0_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 5, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_0_1);
__STORE(__h - 4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_0_1, __reg_0_2);
__STORE(__h - 3, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__STORE(__h - 2, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_0_1);
__STORE(__h - 1, __reg_9_3, __reg_9_4, __reg_9_0, __reg_0_1, __reg_0_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 19, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 18, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 17, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 16, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 15, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 14, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 13, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 12, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 11, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 10, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 9, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 8, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 7, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 6, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 5, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_0_2);
__STORE(__h - 3, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_0_2, __reg_0_3);
__STORE(__h - 2, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__STORE(__h - 1, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_0_2);
__STORE(__h + 0, __reg_9_4, __reg_9_0, __reg_9_1, __reg_0_2, __reg_0_3);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 19, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 18, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__LOAD(__reg_0_4, __h + 3);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 17, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 16, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 15, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 14, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 13, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 12, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 11, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 10, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 9, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 8, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 7, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 6, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 5, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3, __reg_0_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 3, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_0_3);
__STORE(__h - 2, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_0_3, __reg_0_4);
__STORE(__h - 1, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h + 0, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_0_3);
__STORE(__h + 1, __reg_9_0, __reg_9_1, __reg_9_2, __reg_0_3, __reg_0_4);
}
}
else
{
for (__h = 41; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 20, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 20, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 20, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 20, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 20, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 20, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 20, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 20, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
}
__global__ void kernel0_9(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 476;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_7_3;
double __reg_7_4;
double __reg_8_0;
double __reg_8_1;
double __reg_8_2;
double __reg_8_3;
double __reg_8_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __storeValid = __writeValid9;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC7(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC8(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_8_0, 0);
__LOAD(__reg_8_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_8_0, __reg_8_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_8_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_8_0, __reg_8_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_8_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_8_0, __reg_8_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_8_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_8_0, __reg_8_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_8_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_8_0, __reg_8_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_8_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_8_0, __reg_8_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_8_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_8_0, __reg_8_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_8_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_8_0, __reg_8_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_8_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(5, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(7, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(8, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(9, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(10, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(12, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(13, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(14, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, 33);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(15, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, 34);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__LOAD(__reg_0_0, 35);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(17, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__LOAD(__reg_0_1, 36);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 33);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 34);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 35);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 36);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 37; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 18, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 18, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 18, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0, __reg_0_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_0_0);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_0_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_0_1);
__STORE(__h - 2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_0_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_0_2);
__STORE(__h - 1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_0_2, __reg_0_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3, __reg_0_4);
__STORE(__h - 2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_0_3);
__STORE(__h + 0, __reg_8_0, __reg_8_1, __reg_8_2, __reg_0_3, __reg_0_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__LOAD(__reg_0_0, __h + 3);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4);
__STORE(__h - 2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4, __reg_0_0);
__STORE(__h - 1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h + 0, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_0_4);
__STORE(__h + 1, __reg_8_1, __reg_8_2, __reg_8_3, __reg_0_4, __reg_0_0);
}
}
else
{
for (__h = 37; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 18, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 18, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 18, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 18, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 18, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 18, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__h++;
}
}
__global__ void kernel0_8(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_7_3;
double __reg_7_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __storeValid = __writeValid8;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC7(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_7_0, 0);
__LOAD(__reg_7_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_7_0, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_7_0, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_7_0, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_7_0, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_7_0, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_7_0, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(5, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(6, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(7, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(8, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(10, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(11, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(12, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(13, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(15, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3, __reg_0_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4, __reg_0_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, __h + 3);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0);
__STORE(__h + 1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 33; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
}
__global__ void kernel0_7(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __storeValid = __writeValid7;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_6_0, 0);
__LOAD(__reg_6_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_6_0, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_6_0, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_6_0, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_6_0, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_6_0, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(5, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(6, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(8, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(9, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(10, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(11, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(13, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__STORE(__h + 1, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
}
}
else
{
for (__h = 29; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
}
}
__global__ void kernel0_6(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __storeValid = __writeValid6;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_5_0, 0);
__LOAD(__reg_5_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_5_0, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_5_0, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_5_0, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_5_0, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(6, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(7, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(8, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(9, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(11, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, __h + 2);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, __h + 2);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, __h + 3);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h + 1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
}
}
else
{
for (__h = 25; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
}
__global__ void kernel0_5(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __storeValid = __writeValid5;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_4_0, 0);
__LOAD(__reg_4_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_4_0, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_4_0, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_4_0, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(5, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(6, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(7, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(9, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, __h + 3);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__STORE(__h + 1, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
}
}
else
{
for (__h = 21; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
}
}
__global__ void kernel0_4(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_3_0, 0);
__LOAD(__reg_3_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_3_0, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_3_0, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(5, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(7, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, __h + 3);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__STORE(__h + 1, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
}
}
else
{
for (__h = 17; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
}
__global__ void kernel0_3(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_2_0, 0);
__LOAD(__reg_2_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_2_0, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(5, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, __h + 3);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__STORE(__h + 1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 13; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
}
}
__global__ void kernel0_2(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_1_0, 0);
__LOAD(__reg_1_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__STORE(__h + 1, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
}
__global__ void kernel0_1(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h + 1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
}
}
| 2aed49539aa7953c833350cd58e23d3959bc5879.cu | #include "star2d2r-512-10-256_kernel.hu"
__device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; }
__global__ void kernel0_10(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 10;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 472;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_7_3;
double __reg_7_4;
double __reg_8_0;
double __reg_8_1;
double __reg_8_2;
double __reg_8_3;
double __reg_8_4;
double __reg_9_0;
double __reg_9_1;
double __reg_9_2;
double __reg_9_3;
double __reg_9_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10);
const AN5D_TYPE __storeValid = __writeValid10;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC7(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC8(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC9(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid9) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_9_0, 0);
__LOAD(__reg_9_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_9_0, __reg_9_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_9_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_9_0, __reg_9_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_9_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_9_0, __reg_9_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_9_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_9_0, __reg_9_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_9_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_9_0, __reg_9_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_9_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_9_0, __reg_9_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_9_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_9_0, __reg_9_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_9_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_9_0, __reg_9_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_9_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_9_0, __reg_9_1, __reg_8_2, __reg_8_3, __reg_8_4);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_9_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(2, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(3, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(4, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(5, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(6, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(7, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(8, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(9, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(10, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(11, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(12, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__LOAD(__reg_0_3, 33);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(13, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__LOAD(__reg_0_4, 34);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(14, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_0, 35);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(15, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__LOAD(__reg_0_1, 36);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(16, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__LOAD(__reg_0_2, 37);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(17, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__LOAD(__reg_0_3, 38);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(18, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__LOAD(__reg_0_4, 39);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(19, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__LOAD(__reg_0_0, 40);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(20, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 33);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 34);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 35);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 36);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__LOAD(__reg_0_2, 37);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, 38);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, 39);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__LOAD(__reg_0_0, 40);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(20, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 41; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 20, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 20, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 20, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 20, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 19, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 18, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 17, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 16, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 15, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 14, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 13, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 12, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 11, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 10, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 9, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 8, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 7, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_0_4);
__STORE(__h - 6, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_0_4, __reg_0_0);
__STORE(__h - 5, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__STORE(__h - 4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_0_4);
__STORE(__h - 3, __reg_9_1, __reg_9_2, __reg_9_3, __reg_0_4, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 19, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 18, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 17, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 16, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 15, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 14, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 13, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 12, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 11, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 10, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 9, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 8, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 7, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 6, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_0_0);
__STORE(__h - 5, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__STORE(__h - 3, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_0_0);
__STORE(__h - 2, __reg_9_2, __reg_9_3, __reg_9_4, __reg_0_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 19, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 18, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 17, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 16, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 15, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 14, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 13, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 12, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 11, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 10, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 9, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 8, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 7, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 6, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1, __reg_0_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 5, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_0_1);
__STORE(__h - 4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_0_1, __reg_0_2);
__STORE(__h - 3, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__STORE(__h - 2, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_0_1);
__STORE(__h - 1, __reg_9_3, __reg_9_4, __reg_9_0, __reg_0_1, __reg_0_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 19, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 18, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 17, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 16, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 15, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 14, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 13, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 12, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 11, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 10, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 9, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 8, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 7, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 6, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 5, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_0_2);
__STORE(__h - 3, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_0_2, __reg_0_3);
__STORE(__h - 2, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__STORE(__h - 1, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_0_2);
__STORE(__h + 0, __reg_9_4, __reg_9_0, __reg_9_1, __reg_0_2, __reg_0_3);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 19, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 18, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__LOAD(__reg_0_4, __h + 3);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 17, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 16, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 15, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 14, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 13, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 12, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 11, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 10, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 9, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 8, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 7, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 6, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 5, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3, __reg_0_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 3, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_0_3);
__STORE(__h - 2, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_0_3, __reg_0_4);
__STORE(__h - 1, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__STORE(__h + 0, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_0_3);
__STORE(__h + 1, __reg_9_0, __reg_9_1, __reg_9_2, __reg_0_3, __reg_0_4);
}
}
else
{
for (__h = 41; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 20, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 20, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 20, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 20, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC9(__reg_9_3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 20, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC9(__reg_9_4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 20, __reg_9_0, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC9(__reg_9_0, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 20, __reg_9_1, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC9(__reg_9_1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h - 20, __reg_9_2, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC9(__reg_9_2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 20, __reg_9_3, __reg_9_4, __reg_9_0, __reg_9_1, __reg_9_2);
__h++;
}
}
__global__ void kernel0_9(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 476;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_7_3;
double __reg_7_4;
double __reg_8_0;
double __reg_8_1;
double __reg_8_2;
double __reg_8_3;
double __reg_8_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9);
const AN5D_TYPE __storeValid = __writeValid9;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC7(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC8(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid8) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_8_0, 0);
__LOAD(__reg_8_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_8_0, __reg_8_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_8_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_8_0, __reg_8_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_8_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_8_0, __reg_8_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_8_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_8_0, __reg_8_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_8_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_8_0, __reg_8_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_8_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_8_0, __reg_8_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_8_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_8_0, __reg_8_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_8_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_8_0, __reg_8_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_8_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(2, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(3, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(4, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(5, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(7, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(8, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(9, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(10, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(12, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(13, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(14, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, 33);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(15, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, 34);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__LOAD(__reg_0_0, 35);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(17, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__LOAD(__reg_0_1, 36);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 33);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 34);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 35);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 36);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 37; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 18, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 18, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 18, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0, __reg_0_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_0_0);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_0_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_0_1);
__STORE(__h - 2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_0_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__STORE(__h - 2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_0_2);
__STORE(__h - 1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_0_2, __reg_0_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3, __reg_0_4);
__STORE(__h - 2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__STORE(__h - 1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_0_3);
__STORE(__h + 0, __reg_8_0, __reg_8_1, __reg_8_2, __reg_0_3, __reg_0_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 17, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 16, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__LOAD(__reg_0_0, __h + 3);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 15, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 14, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 13, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 12, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 11, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 10, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 9, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 8, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 7, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 6, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 5, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 4, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 3, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4);
__STORE(__h - 2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4, __reg_0_0);
__STORE(__h - 1, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__STORE(__h + 0, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_0_4);
__STORE(__h + 1, __reg_8_1, __reg_8_2, __reg_8_3, __reg_0_4, __reg_0_0);
}
}
else
{
for (__h = 37; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 18, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 18, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 18, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC8(__reg_8_1, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 18, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC8(__reg_8_2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h - 18, __reg_8_3, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC8(__reg_8_3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 18, __reg_8_4, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC8(__reg_8_4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 18, __reg_8_0, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC8(__reg_8_0, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 18, __reg_8_1, __reg_8_2, __reg_8_3, __reg_8_4, __reg_8_0);
__h++;
}
}
__global__ void kernel0_8(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 480;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
double __reg_7_0;
double __reg_7_1;
double __reg_7_2;
double __reg_7_3;
double __reg_7_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8);
const AN5D_TYPE __storeValid = __writeValid8;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC7(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid7) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_7_0, 0);
__LOAD(__reg_7_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_7_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_7_0, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_7_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_7_0, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_7_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_7_0, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_7_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_7_0, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_7_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_7_0, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_7_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_7_0, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_7_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(2, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(3, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(5, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(6, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(7, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(8, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(10, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(11, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(12, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(13, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(15, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 29);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 30);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 31);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 32);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_0_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_0_2, __reg_0_3);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_0_3, __reg_0_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_0_4, __reg_0_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 15, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 14, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__LOAD(__reg_0_1, __h + 3);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 13, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 12, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 11, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 10, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 9, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 8, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 7, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 6, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 5, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 4, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 3, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__STORE(__h - 2, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
__STORE(__h - 1, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__STORE(__h + 0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0);
__STORE(__h + 1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 33; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC7(__reg_7_4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 16, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC7(__reg_7_0, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 16, __reg_7_1, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC7(__reg_7_1, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 16, __reg_7_2, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC7(__reg_7_2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 16, __reg_7_3, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC7(__reg_7_3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h - 16, __reg_7_4, __reg_7_0, __reg_7_1, __reg_7_2, __reg_7_3);
__h++;
}
}
__global__ void kernel0_7(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 484;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
double __reg_6_0;
double __reg_6_1;
double __reg_6_2;
double __reg_6_3;
double __reg_6_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7);
const AN5D_TYPE __storeValid = __writeValid7;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC6(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid6) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_6_0, 0);
__LOAD(__reg_6_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_6_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_6_0, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_6_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_6_0, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_6_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_6_0, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_6_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_6_0, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_6_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_6_0, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_6_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(3, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(4, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(5, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(6, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(8, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(9, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(10, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(11, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(13, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 25);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 26);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 27);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 28);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_0_2, __reg_0_3);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_0_3, __reg_0_4);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_0_4, __reg_0_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0);
__STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_0_0, __reg_0_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 13, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 12, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 11, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 10, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 9, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 8, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 7, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 6, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 5, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 4, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__STORE(__h - 2, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
__STORE(__h - 1, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__STORE(__h + 0, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1);
__STORE(__h + 1, __reg_6_3, __reg_6_4, __reg_6_0, __reg_0_1, __reg_0_2);
}
}
else
{
for (__h = 29; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC6(__reg_6_2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 14, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC6(__reg_6_3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 14, __reg_6_4, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC6(__reg_6_4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h - 14, __reg_6_0, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC6(__reg_6_0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 14, __reg_6_1, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC6(__reg_6_1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 14, __reg_6_2, __reg_6_3, __reg_6_4, __reg_6_0, __reg_6_1);
__h++;
}
}
__global__ void kernel0_6(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 488;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
double __reg_5_0;
double __reg_5_1;
double __reg_5_2;
double __reg_5_3;
double __reg_5_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6);
const AN5D_TYPE __storeValid = __writeValid6;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC5(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid5) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_5_0, 0);
__LOAD(__reg_5_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_5_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_5_0, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_5_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_5_0, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_5_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_5_0, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_5_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_5_0, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_5_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(2, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(3, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(4, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(6, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(7, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(8, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(9, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(11, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 21);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 22);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 23);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 24);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_0_3, __reg_0_4);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_0_4, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_0_0, __reg_0_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, __h + 2);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1);
__STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_0_1, __reg_0_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__LOAD(__reg_0_1, __h + 1);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 11, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__LOAD(__reg_0_2, __h + 2);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 10, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__LOAD(__reg_0_3, __h + 3);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 9, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 8, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 7, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 6, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 5, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 4, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 3, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h - 2, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
__STORE(__h - 1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__STORE(__h + 0, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2);
__STORE(__h + 1, __reg_5_4, __reg_5_0, __reg_5_1, __reg_0_2, __reg_0_3);
}
}
else
{
for (__h = 25; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC5(__reg_5_0, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h - 12, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC5(__reg_5_1, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 12, __reg_5_2, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC5(__reg_5_2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 12, __reg_5_3, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC5(__reg_5_3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 12, __reg_5_4, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC5(__reg_5_4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 12, __reg_5_0, __reg_5_1, __reg_5_2, __reg_5_3, __reg_5_4);
__h++;
}
}
__global__ void kernel0_5(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 492;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
double __reg_4_0;
double __reg_4_1;
double __reg_4_2;
double __reg_4_3;
double __reg_4_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5);
const AN5D_TYPE __storeValid = __writeValid5;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC4(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid4) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_4_0, 0);
__LOAD(__reg_4_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_4_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_4_0, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_4_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_4_0, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_4_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_4_0, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_4_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(2, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(4, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(5, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(6, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(7, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(9, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 17);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 18);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 19);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 20);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_0_4, __reg_0_0);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_0_0, __reg_0_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_0_1, __reg_0_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2);
__STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_0_2, __reg_0_3);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_1, __h + 0);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__LOAD(__reg_0_2, __h + 1);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 9, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__LOAD(__reg_0_3, __h + 2);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 8, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__LOAD(__reg_0_4, __h + 3);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 7, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 6, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 5, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 3, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__STORE(__h - 2, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
__STORE(__h - 1, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__STORE(__h + 0, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3);
__STORE(__h + 1, __reg_4_0, __reg_4_1, __reg_4_2, __reg_0_3, __reg_0_4);
}
}
else
{
for (__h = 21; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC4(__reg_4_3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 10, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC4(__reg_4_4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 10, __reg_4_0, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC4(__reg_4_0, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 10, __reg_4_1, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC4(__reg_4_1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h - 10, __reg_4_2, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC4(__reg_4_2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 10, __reg_4_3, __reg_4_4, __reg_4_0, __reg_4_1, __reg_4_2);
__h++;
}
}
__global__ void kernel0_4(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 496;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
double __reg_3_0;
double __reg_3_1;
double __reg_3_2;
double __reg_3_3;
double __reg_3_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC3(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid3) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_3_0, 0);
__LOAD(__reg_3_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_3_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_3_0, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_3_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_3_0, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_3_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(2, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(3, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(4, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(5, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(7, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__LOAD(__reg_0_3, 13);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 14);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 15);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 16);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_0_0, __reg_0_1);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_0_1, __reg_0_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_0_2, __reg_0_3);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3);
__STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_0_3, __reg_0_4);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_2, __h + 0);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__LOAD(__reg_0_3, __h + 1);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 7, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__LOAD(__reg_0_4, __h + 2);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 6, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__LOAD(__reg_0_0, __h + 3);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 5, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 4, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 3, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__STORE(__h - 2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
__STORE(__h - 1, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__STORE(__h + 0, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4);
__STORE(__h + 1, __reg_3_1, __reg_3_2, __reg_3_3, __reg_0_4, __reg_0_0);
}
}
else
{
for (__h = 17; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__CALC3(__reg_3_1, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 8, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__CALC3(__reg_3_2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h - 8, __reg_3_3, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__CALC3(__reg_3_3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 8, __reg_3_4, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__CALC3(__reg_3_4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 8, __reg_3_0, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__CALC3(__reg_3_0, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 8, __reg_3_1, __reg_3_2, __reg_3_3, __reg_3_4, __reg_3_0);
__h++;
}
}
__global__ void kernel0_3(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 500;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
double __reg_2_0;
double __reg_2_1;
double __reg_2_2;
double __reg_2_3;
double __reg_2_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __CALC2(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid2) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_2_0, 0);
__LOAD(__reg_2_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_2_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_2_0, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_2_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(2, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(3, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(5, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__LOAD(__reg_0_4, 9);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, 10);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, 11);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 12);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_0_1, __reg_0_2);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_0_2, __reg_0_3);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_0_3, __reg_0_4);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4);
__STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_0_4, __reg_0_0);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_3, __h + 0);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__LOAD(__reg_0_4, __h + 1);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 5, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__LOAD(__reg_0_0, __h + 2);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 4, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__LOAD(__reg_0_1, __h + 3);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 3, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__STORE(__h - 2, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
__STORE(__h - 1, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__STORE(__h + 0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0);
__STORE(__h + 1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_0_0, __reg_0_1);
}
}
else
{
for (__h = 13; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__CALC2(__reg_2_4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__STORE(__h - 6, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__CALC2(__reg_2_0, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 6, __reg_2_1, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__CALC2(__reg_2_1, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 6, __reg_2_2, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__CALC2(__reg_2_2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 6, __reg_2_3, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__CALC2(__reg_2_3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h - 6, __reg_2_4, __reg_2_0, __reg_2_1, __reg_2_2, __reg_2_3);
__h++;
}
}
__global__ void kernel0_2(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 504;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
double __reg_1_0;
double __reg_1_1;
double __reg_1_2;
double __reg_1_3;
double __reg_1_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __CALC1(out, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__writeValid1) __CALCEXPR(out, reg0, reg1, reg2, reg3, reg4); else out = reg2; } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_1_0, 0);
__LOAD(__reg_1_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_1_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(3, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__LOAD(__reg_0_0, 5);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, 6);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, 7);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, 8);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
}
__c_sb = __c_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_0_2, __reg_0_3);
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_0_3, __reg_0_4);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_0_4, __reg_0_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0);
__STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_0_0, __reg_0_1);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_4, __h + 0);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__LOAD(__reg_0_0, __h + 1);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__LOAD(__reg_0_1, __h + 2);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 2, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__LOAD(__reg_0_2, __h + 3);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 1, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__STORE(__h + 0, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1);
__STORE(__h + 1, __reg_1_3, __reg_1_4, __reg_1_0, __reg_0_1, __reg_0_2);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__CALC1(__reg_1_2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__STORE(__h - 4, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__CALC1(__reg_1_3, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__STORE(__h - 4, __reg_1_4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__CALC1(__reg_1_4, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__STORE(__h - 4, __reg_1_0, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__CALC1(__reg_1_0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__STORE(__h - 4, __reg_1_1, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__CALC1(__reg_1_1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__STORE(__h - 4, __reg_1_2, __reg_1_3, __reg_1_4, __reg_1_0, __reg_1_1);
__h++;
}
}
__global__ void kernel0_1(double *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 2 - 2);
const AN5D_TYPE __c1Pad = (2);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 2 - 2);
const AN5D_TYPE __c2Pad = (2);
#define __c2 c2
const AN5D_TYPE __halo1 = 2;
const AN5D_TYPE __halo2 = 2;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 508;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num;
const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
double __reg_0_0;
double __reg_0_1;
double __reg_0_2;
double __reg_0_3;
double __reg_0_4;
__shared__ double __c_sb_double[__blockSize * 2];
double *__c_sb = __c_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0)
#define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2])
#define __REGREF(reg, i2) reg
#define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2)
#define __CALCEXPR(__rn0, __a, __b, __c, __d, __e) do { __rn0 = (((((((((0.09371f * (__REGREF(__a, 0))) + (0.09374f * (__REGREF(__b, 0)))) + (0.09376f * (__SBREF(__c_sb, -2)))) + (0.09372f * (__SBREF(__c_sb, -1)))) + (0.25001f * (__REGREF(__c, 0)))) + (0.09377f * (__SBREF(__c_sb, 1)))) + (0.09373f * (__SBREF(__c_sb, 2)))) + (0.09375f * (__REGREF(__d, 0)))) + (0.09378f * (__REGREF(__e, 0)))); } while (0)
#define __DB_SWITCH() do { __c_sb = &__c_sb_double[(__c_sb == __c_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a, b, c, d, e) do { __DB_SWITCH(); __c_sb[__tid] = c; __syncthreads(); } while (0)
#define __STORE(h, reg0, reg1, reg2, reg3, reg4) do { __CALCSETUP(reg0, reg1, reg2, reg3, reg4); if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __CALCEXPR(__DEST, reg0, reg1, reg2, reg3, reg4); } } while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
else
{
__LOAD(__reg_0_0, 0);
__LOAD(__reg_0_1, 1);
__LOAD(__reg_0_2, 2);
__LOAD(__reg_0_3, 3);
__LOAD(__reg_0_4, 4);
__STORE(2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
}
__c_sb = __c_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 5;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 0 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0_0, __h + 0);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__LOAD(__reg_0_1, __h + 1);
__STORE(__h - 1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__LOAD(__reg_0_2, __h + 2);
__STORE(__h + 0, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__LOAD(__reg_0_3, __h + 3);
__STORE(__h + 1, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 5;)
{
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0_0, __h);
__STORE(__h - 2, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_1, __h);
__STORE(__h - 2, __reg_0_2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_2, __h);
__STORE(__h - 2, __reg_0_3, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_3, __h);
__STORE(__h - 2, __reg_0_4, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0_4, __h);
__STORE(__h - 2, __reg_0_0, __reg_0_1, __reg_0_2, __reg_0_3, __reg_0_4);
__h++;
}
}
|
5aa2e83cc3b88dd485cad0fd5733b20f47784cd7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//This file is partly based on Mehran Maghoumi's work: https://github.com/Maghoumi/culibrealsense
#ifdef RS2_USE_CUDA
#include "cuda-conversion.cuh"
#include <iostream>
#include <iomanip>
/*
// conversion to Y8 is currently not available in the API
__global__ void kernel_unpack_yuy2_y8_cuda(const uint8_t * src, uint8_t *dst, int superPixCount)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= superPixCount)
return;
int idx = i * 4;
dst[idx] = src[idx];
dst[idx + 1] = src[idx + 2];
dst[idx + 2] = src[idx + 4];
dst[idx + 3] = src[idx + 6];
dst[idx + 4] = src[idx + 8];
dst[idx + 5] = src[idx + 10];
dst[idx + 6] = src[idx + 12];
dst[idx + 7] = src[idx + 14];
dst[idx + 8] = src[idx + 16];
dst[idx + 9] = src[idx + 18];
dst[idx + 10] = src[idx + 20];
dst[idx + 11] = src[idx + 22];
dst[idx + 12] = src[idx + 24];
dst[idx + 13] = src[idx + 26];
dst[idx + 14] = src[idx + 28];
dst[idx + 15] = src[idx + 30];
}
*/
__global__ void kernel_unpack_yuy2_y16_cuda(const uint8_t * src, uint8_t *dst, int superPixCount)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
if (i >= superPixCount)
return;
for (; i < superPixCount; i += stride) {
int idx = i * 4;
dst[idx] = 0;
dst[idx + 1] = src[idx + 0];
dst[idx + 2] = 0;
dst[idx + 3] = src[idx + 2];
}
}
__global__ void kernel_unpack_yuy2_rgb8_cuda(const uint8_t * src, uint8_t *dst, int superPixCount)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
if (i >= superPixCount)
return;
for (; i < superPixCount; i += stride) {
int idx = i * 4;
uint8_t y0 = src[idx];
uint8_t u0 = src[idx + 1];
uint8_t y1 = src[idx + 2];
uint8_t v0 = src[idx + 3];
int16_t c = y0 - 16;
int16_t d = u0 - 128;
int16_t e = v0 - 128;
int32_t t;
#define clamp(x) ((t=(x)) > 255 ? 255 : t < 0 ? 0 : t)
int odx = i * 6;
dst[odx] = clamp((298 * c + 409 * e + 128) >> 8);
dst[odx + 1] = clamp((298 * c - 100 * d - 409 * e + 128) >> 8);
dst[odx + 2] = clamp((298 * c + 516 * d + 128) >> 8);
c = y1 - 16;
dst[odx + 3] = clamp((298 * c + 409 * e + 128) >> 8);
dst[odx + 4] = clamp((298 * c - 100 * d - 409 * e + 128) >> 8);
dst[odx + 5] = clamp((298 * c + 516 * d + 128) >> 8);
#undef clamp
}
}
__global__ void kernel_unpack_yuy2_bgr8_cuda(const uint8_t * src, uint8_t *dst, int superPixCount)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
if (i >= superPixCount)
return;
for (; i < superPixCount; i += stride) {
int idx = i * 4;
uint8_t y0 = src[idx];
uint8_t u0 = src[idx + 1];
uint8_t y1 = src[idx + 2];
uint8_t v0 = src[idx + 3];
int16_t c = y0 - 16;
int16_t d = u0 - 128;
int16_t e = v0 - 128;
int32_t t;
#define clamp(x) ((t=(x)) > 255 ? 255 : t < 0 ? 0 : t)
int odx = i * 6;
dst[odx + 2] = clamp((298 * c + 409 * e + 128) >> 8);
dst[odx + 1] = clamp((298 * c - 100 * d - 409 * e + 128) >> 8);
dst[odx ] = clamp((298 * c + 516 * d + 128) >> 8);
c = y1 - 16;
dst[odx + 5] = clamp((298 * c + 409 * e + 128) >> 8);
dst[odx + 4] = clamp((298 * c - 100 * d - 409 * e + 128) >> 8);
dst[odx + 3] = clamp((298 * c + 516 * d + 128) >> 8);
#undef clamp
}
}
__global__ void kernel_unpack_yuy2_rgba8_cuda(const uint8_t * src, uint8_t *dst, int superPixCount)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
if (i >= superPixCount)
return;
for (; i < superPixCount; i += stride) {
int idx = i * 4;
uint8_t y0 = src[idx];
uint8_t u0 = src[idx + 1];
uint8_t y1 = src[idx + 2];
uint8_t v0 = src[idx + 3];
int16_t c = y0 - 16;
int16_t d = u0 - 128;
int16_t e = v0 - 128;
int32_t t;
#define clamp(x) ((t=(x)) > 255 ? 255 : t < 0 ? 0 : t)
int odx = i * 8;
dst[odx] = clamp((298 * c + 409 * e + 128) >> 8);
dst[odx + 1] = clamp((298 * c - 100 * d - 409 * e + 128) >> 8);
dst[odx + 2] = clamp((298 * c + 516 * d + 128) >> 8);
dst[odx + 3] = 255;
c = y1 - 16;
dst[odx + 4] = clamp((298 * c + 409 * e + 128) >> 8);
dst[odx + 5] = clamp((298 * c - 100 * d - 409 * e + 128) >> 8);
dst[odx + 6] = clamp((298 * c + 516 * d + 128) >> 8);
dst[odx + 7] = 255;
#undef clamp
}
}
__global__ void kernel_unpack_yuy2_bgra8_cuda(const uint8_t * src, uint8_t *dst, int superPixCount)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
if (i >= superPixCount)
return;
for (; i < superPixCount; i += stride) {
int idx = i * 4;
uint8_t y0 = src[idx];
uint8_t u0 = src[idx + 1];
uint8_t y1 = src[idx + 2];
uint8_t v0 = src[idx + 3];
int16_t c = y0 - 16;
int16_t d = u0 - 128;
int16_t e = v0 - 128;
int32_t t;
#define clamp(x) ((t=(x)) > 255 ? 255 : t < 0 ? 0 : t)
int odx = i * 8;
dst[odx + 3] = 255;
dst[odx + 2] = clamp((298 * c + 409 * e + 128) >> 8);
dst[odx + 1] = clamp((298 * c - 100 * d - 409 * e + 128) >> 8);
dst[odx ] = clamp((298 * c + 516 * d + 128) >> 8);
c = y1 - 16;
dst[odx + 7] = 255;
dst[odx + 6] = clamp((298 * c + 409 * e + 128) >> 8);
dst[odx + 5] = clamp((298 * c - 100 * d - 409 * e + 128) >> 8);
dst[odx + 4] = clamp((298 * c + 516 * d + 128) >> 8);
#undef clamp
}
}
void rscuda::unpack_yuy2_cuda_helper(const uint8_t* src, uint8_t* dst, int n, rs2_format format)
{
/* hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start); */
// How many super pixels do we have?
int superPix = n / 2;
uint8_t *devSrc = 0;
uint8_t *devDst = 0;
hipError_t result = hipMalloc(&devSrc, superPix * sizeof(uint8_t) * 4);
assert(result == hipSuccess);
result = hipMemcpy(devSrc, src, superPix * sizeof(uint8_t) * 4, hipMemcpyHostToDevice);
assert(result == hipSuccess);
int numBlocks = superPix / RS2_CUDA_THREADS_PER_BLOCK;
int size;
switch (format)
{
// conversion to Y8 is currently not available in the API
/* case RS2_FORMAT_Y8:
size = 1;
result = hipMalloc(&devDst, n * sizeof(uint8_t) * size);
assert(result == hipSuccess);
kernel_unpack_yuy2_y8_cuda<<<numBlocks, RS2_CUDA_THREADS_PER_BLOCK>>>(devSrc, devDst, superPix);
break;
*/
case RS2_FORMAT_Y16:
size = 2;
result = hipMalloc(&devDst, n * sizeof(uint8_t) * size);
assert(result == hipSuccess);
hipLaunchKernelGGL(( kernel_unpack_yuy2_y16_cuda), dim3(numBlocks), dim3(RS2_CUDA_THREADS_PER_BLOCK), 0, 0, devSrc, devDst, superPix);
break;
case RS2_FORMAT_RGB8:
size = 3;
result = hipMalloc(&devDst, n * sizeof(uint8_t) * size);
assert(result == hipSuccess);
hipLaunchKernelGGL(( kernel_unpack_yuy2_rgb8_cuda), dim3(numBlocks), dim3(RS2_CUDA_THREADS_PER_BLOCK), 0, 0, devSrc, devDst, superPix);
break;
case RS2_FORMAT_BGR8:
size = 3;
result = hipMalloc(&devDst, n * sizeof(uint8_t) * size);
assert(result == hipSuccess);
hipLaunchKernelGGL(( kernel_unpack_yuy2_bgr8_cuda), dim3(numBlocks), dim3(RS2_CUDA_THREADS_PER_BLOCK), 0, 0, devSrc, devDst, superPix);
break;
case RS2_FORMAT_RGBA8:
size = 4;
result = hipMalloc(&devDst, n * sizeof(uint8_t) * size);
assert(result == hipSuccess);
hipLaunchKernelGGL(( kernel_unpack_yuy2_rgba8_cuda), dim3(numBlocks), dim3(RS2_CUDA_THREADS_PER_BLOCK), 0, 0, devSrc, devDst, superPix);
break;
case RS2_FORMAT_BGRA8:
size = 4;
result = hipMalloc(&devDst, n * sizeof(uint8_t) * size);
assert(result == hipSuccess);
hipLaunchKernelGGL(( kernel_unpack_yuy2_bgra8_cuda), dim3(numBlocks), dim3(RS2_CUDA_THREADS_PER_BLOCK), 0, 0, devSrc, devDst, superPix);
break;
default:
assert(false);
}
result = hipGetLastError();
assert(result == hipSuccess);
result = hipMemcpy(dst, devDst, n * sizeof(uint8_t) * size, hipMemcpyDeviceToHost);
assert(result == hipSuccess);
hipFree(devSrc);
hipFree(devDst);
/* hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << "\n"; */
}
__global__ void kernel_split_frame_y8_y8_from_y8i_cuda(uint8_t* a, uint8_t* b, int count, const rscuda::y8i_pixel * source)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= count)
return;
a[i] = source[i].l;
b[i] = source[i].r;
}
void rscuda::y8_y8_from_y8i_cuda_helper(uint8_t* const dest[], int count, const rscuda::y8i_pixel * source)
{
/* hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start); */
int numBlocks = count / RS2_CUDA_THREADS_PER_BLOCK;
uint8_t* a = dest[0];
uint8_t* b = dest[1];
rscuda::y8i_pixel *devSrc = 0;
uint8_t *devDst1 = 0; // for dest[0]
uint8_t *devDst2 = 0; // for dest[1]
hipError_t result = hipMalloc(&devSrc, count * sizeof(rscuda::y8i_pixel));
assert(result == hipSuccess);
result = hipMemcpy(devSrc, source, count * sizeof(rscuda::y8i_pixel), hipMemcpyHostToDevice);
assert(result == hipSuccess);
result = hipMalloc(&devDst1, count * sizeof(uint8_t));
assert(result == hipSuccess);
result = hipMalloc(&devDst2, count * sizeof(uint8_t));
assert(result == hipSuccess);
hipLaunchKernelGGL(( kernel_split_frame_y8_y8_from_y8i_cuda), dim3(numBlocks), dim3(RS2_CUDA_THREADS_PER_BLOCK), 0, 0, devDst1, devDst2, count, devSrc);
result = hipGetLastError();
assert(result == hipSuccess);
result = hipMemcpy(a, devDst1, count * sizeof(uint8_t), hipMemcpyDeviceToHost);
assert(result == hipSuccess);
result = hipMemcpy(b, devDst2, count * sizeof(uint8_t), hipMemcpyDeviceToHost);
assert(result == hipSuccess);
hipFree(devSrc);
hipFree(devDst1);
hipFree(devDst2);
/* hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << std::endl; */
}
__global__ void kernel_split_frame_y16_y16_from_y12i_cuda(uint16_t* a, uint16_t* b, int count, const rscuda::y12i_pixel * source)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= count)
return;
a[i] = source[i].l() << 6 | source[i].l() >> 4;
b[i] = source[i].r() << 6 | source[i].r() >> 4;
}
void rscuda::y16_y16_from_y12i_10_cuda_helper(uint8_t* const dest[], int count, const rscuda::y12i_pixel * source)
{
/*
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start); */
source = reinterpret_cast<const y12i_pixel*>(source);
int numBlocks = count / RS2_CUDA_THREADS_PER_BLOCK;
uint16_t* a = reinterpret_cast<uint16_t*>(dest[0]);
uint16_t* b = reinterpret_cast<uint16_t*>(dest[1]);
rscuda::y12i_pixel *devSrc = 0;
uint16_t *devDst1 = 0; // for dest[0]
uint16_t *devDst2 = 0; // for dest[1]
hipError_t result = hipMalloc(&devSrc, count * sizeof(rscuda::y12i_pixel));
assert(result == hipSuccess);
result = hipMemcpy(devSrc, source, count * sizeof(rscuda::y12i_pixel), hipMemcpyHostToDevice);
assert(result == hipSuccess);
result = hipMalloc(&devDst1, count * sizeof(uint16_t));
assert(result == hipSuccess);
result = hipMalloc(&devDst2, count * sizeof(uint16_t));
assert(result == hipSuccess);
hipLaunchKernelGGL(( kernel_split_frame_y16_y16_from_y12i_cuda), dim3(numBlocks), dim3(RS2_CUDA_THREADS_PER_BLOCK), 0, 0, devDst1, devDst2, count, devSrc);
result = hipGetLastError();
assert(result == hipSuccess);
result = hipMemcpy(a, devDst1, count * sizeof(uint16_t), hipMemcpyDeviceToHost);
assert(result == hipSuccess);
result = hipMemcpy(b, devDst2, count * sizeof(uint16_t), hipMemcpyDeviceToHost);
assert(result == hipSuccess);
hipFree(devSrc);
hipFree(devDst1);
hipFree(devDst2);
/*
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << std::endl;
*/
}
__global__ void kernel_z16_y8_from_sr300_inzi_cuda (const uint16_t* source, uint8_t* const dest, int count)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= count)
return;
dest[i] = source[i] >> 2;
}
void rscuda::unpack_z16_y8_from_sr300_inzi_cuda (uint8_t * const dest, const uint16_t * source, int count)
{
/* hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start); */
uint16_t *devSrc = 0;
uint8_t *devDst = 0;
int numBlocks = count / RS2_CUDA_THREADS_PER_BLOCK;
hipError_t result = hipMalloc(&devSrc, count * sizeof(uint16_t));
assert(result == hipSuccess);
result = hipMemcpy(devSrc, source, count * sizeof(uint16_t), hipMemcpyHostToDevice);
assert(result == hipSuccess);
result = hipMalloc(&devDst, count * sizeof(uint8_t));
assert(result == hipSuccess);
hipLaunchKernelGGL(( kernel_z16_y8_from_sr300_inzi_cuda), dim3(numBlocks), dim3(RS2_CUDA_THREADS_PER_BLOCK), 0, 0, devSrc, devDst, count);
result = hipMemcpy(dest, devDst, count * sizeof(uint8_t), hipMemcpyDeviceToHost);
assert(result == hipSuccess);
hipFree(devSrc);
hipFree(devDst);
/* hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << std::endl; */
}
__global__ void kernel_z16_y16_from_sr300_inzi_cuda (uint16_t* const source, uint16_t* const dest, int count)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= count)
return;
dest[i] = source[i] << 6;
}
void rscuda::unpack_z16_y16_from_sr300_inzi_cuda(uint16_t * const dest, const uint16_t * source, int count)
{
/* hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start); */
uint16_t *devSrc = 0;
uint16_t *devDst = 0;
int numBlocks = count / RS2_CUDA_THREADS_PER_BLOCK;
hipError_t result = hipMalloc(&devSrc, count * sizeof(uint16_t));
assert(result == hipSuccess);
result = hipMemcpy(devSrc, source, count * sizeof(uint16_t), hipMemcpyHostToDevice);
assert(result == hipSuccess);
result = hipMalloc(&devDst, count * sizeof(uint16_t));
assert(result == hipSuccess);
hipLaunchKernelGGL(( kernel_z16_y16_from_sr300_inzi_cuda), dim3(numBlocks), dim3(RS2_CUDA_THREADS_PER_BLOCK), 0, 0, devSrc, devDst, count);
result = hipMemcpy(dest, devDst, count * sizeof(uint16_t), hipMemcpyDeviceToHost);
assert(result == hipSuccess);
hipFree(devSrc);
hipFree(devDst);
/* hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << std::endl; */
}
#endif
| 5aa2e83cc3b88dd485cad0fd5733b20f47784cd7.cu | //This file is partly based on Mehran Maghoumi's work: https://github.com/Maghoumi/culibrealsense
#ifdef RS2_USE_CUDA
#include "cuda-conversion.cuh"
#include <iostream>
#include <iomanip>
/*
// conversion to Y8 is currently not available in the API
__global__ void kernel_unpack_yuy2_y8_cuda(const uint8_t * src, uint8_t *dst, int superPixCount)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= superPixCount)
return;
int idx = i * 4;
dst[idx] = src[idx];
dst[idx + 1] = src[idx + 2];
dst[idx + 2] = src[idx + 4];
dst[idx + 3] = src[idx + 6];
dst[idx + 4] = src[idx + 8];
dst[idx + 5] = src[idx + 10];
dst[idx + 6] = src[idx + 12];
dst[idx + 7] = src[idx + 14];
dst[idx + 8] = src[idx + 16];
dst[idx + 9] = src[idx + 18];
dst[idx + 10] = src[idx + 20];
dst[idx + 11] = src[idx + 22];
dst[idx + 12] = src[idx + 24];
dst[idx + 13] = src[idx + 26];
dst[idx + 14] = src[idx + 28];
dst[idx + 15] = src[idx + 30];
}
*/
__global__ void kernel_unpack_yuy2_y16_cuda(const uint8_t * src, uint8_t *dst, int superPixCount)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
if (i >= superPixCount)
return;
for (; i < superPixCount; i += stride) {
int idx = i * 4;
dst[idx] = 0;
dst[idx + 1] = src[idx + 0];
dst[idx + 2] = 0;
dst[idx + 3] = src[idx + 2];
}
}
__global__ void kernel_unpack_yuy2_rgb8_cuda(const uint8_t * src, uint8_t *dst, int superPixCount)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
if (i >= superPixCount)
return;
for (; i < superPixCount; i += stride) {
int idx = i * 4;
uint8_t y0 = src[idx];
uint8_t u0 = src[idx + 1];
uint8_t y1 = src[idx + 2];
uint8_t v0 = src[idx + 3];
int16_t c = y0 - 16;
int16_t d = u0 - 128;
int16_t e = v0 - 128;
int32_t t;
#define clamp(x) ((t=(x)) > 255 ? 255 : t < 0 ? 0 : t)
int odx = i * 6;
dst[odx] = clamp((298 * c + 409 * e + 128) >> 8);
dst[odx + 1] = clamp((298 * c - 100 * d - 409 * e + 128) >> 8);
dst[odx + 2] = clamp((298 * c + 516 * d + 128) >> 8);
c = y1 - 16;
dst[odx + 3] = clamp((298 * c + 409 * e + 128) >> 8);
dst[odx + 4] = clamp((298 * c - 100 * d - 409 * e + 128) >> 8);
dst[odx + 5] = clamp((298 * c + 516 * d + 128) >> 8);
#undef clamp
}
}
__global__ void kernel_unpack_yuy2_bgr8_cuda(const uint8_t * src, uint8_t *dst, int superPixCount)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
if (i >= superPixCount)
return;
for (; i < superPixCount; i += stride) {
int idx = i * 4;
uint8_t y0 = src[idx];
uint8_t u0 = src[idx + 1];
uint8_t y1 = src[idx + 2];
uint8_t v0 = src[idx + 3];
int16_t c = y0 - 16;
int16_t d = u0 - 128;
int16_t e = v0 - 128;
int32_t t;
#define clamp(x) ((t=(x)) > 255 ? 255 : t < 0 ? 0 : t)
int odx = i * 6;
dst[odx + 2] = clamp((298 * c + 409 * e + 128) >> 8);
dst[odx + 1] = clamp((298 * c - 100 * d - 409 * e + 128) >> 8);
dst[odx ] = clamp((298 * c + 516 * d + 128) >> 8);
c = y1 - 16;
dst[odx + 5] = clamp((298 * c + 409 * e + 128) >> 8);
dst[odx + 4] = clamp((298 * c - 100 * d - 409 * e + 128) >> 8);
dst[odx + 3] = clamp((298 * c + 516 * d + 128) >> 8);
#undef clamp
}
}
__global__ void kernel_unpack_yuy2_rgba8_cuda(const uint8_t * src, uint8_t *dst, int superPixCount)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
if (i >= superPixCount)
return;
for (; i < superPixCount; i += stride) {
int idx = i * 4;
uint8_t y0 = src[idx];
uint8_t u0 = src[idx + 1];
uint8_t y1 = src[idx + 2];
uint8_t v0 = src[idx + 3];
int16_t c = y0 - 16;
int16_t d = u0 - 128;
int16_t e = v0 - 128;
int32_t t;
#define clamp(x) ((t=(x)) > 255 ? 255 : t < 0 ? 0 : t)
int odx = i * 8;
dst[odx] = clamp((298 * c + 409 * e + 128) >> 8);
dst[odx + 1] = clamp((298 * c - 100 * d - 409 * e + 128) >> 8);
dst[odx + 2] = clamp((298 * c + 516 * d + 128) >> 8);
dst[odx + 3] = 255;
c = y1 - 16;
dst[odx + 4] = clamp((298 * c + 409 * e + 128) >> 8);
dst[odx + 5] = clamp((298 * c - 100 * d - 409 * e + 128) >> 8);
dst[odx + 6] = clamp((298 * c + 516 * d + 128) >> 8);
dst[odx + 7] = 255;
#undef clamp
}
}
__global__ void kernel_unpack_yuy2_bgra8_cuda(const uint8_t * src, uint8_t *dst, int superPixCount)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
if (i >= superPixCount)
return;
for (; i < superPixCount; i += stride) {
int idx = i * 4;
uint8_t y0 = src[idx];
uint8_t u0 = src[idx + 1];
uint8_t y1 = src[idx + 2];
uint8_t v0 = src[idx + 3];
int16_t c = y0 - 16;
int16_t d = u0 - 128;
int16_t e = v0 - 128;
int32_t t;
#define clamp(x) ((t=(x)) > 255 ? 255 : t < 0 ? 0 : t)
int odx = i * 8;
dst[odx + 3] = 255;
dst[odx + 2] = clamp((298 * c + 409 * e + 128) >> 8);
dst[odx + 1] = clamp((298 * c - 100 * d - 409 * e + 128) >> 8);
dst[odx ] = clamp((298 * c + 516 * d + 128) >> 8);
c = y1 - 16;
dst[odx + 7] = 255;
dst[odx + 6] = clamp((298 * c + 409 * e + 128) >> 8);
dst[odx + 5] = clamp((298 * c - 100 * d - 409 * e + 128) >> 8);
dst[odx + 4] = clamp((298 * c + 516 * d + 128) >> 8);
#undef clamp
}
}
void rscuda::unpack_yuy2_cuda_helper(const uint8_t* src, uint8_t* dst, int n, rs2_format format)
{
/* cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start); */
// How many super pixels do we have?
int superPix = n / 2;
uint8_t *devSrc = 0;
uint8_t *devDst = 0;
cudaError_t result = cudaMalloc(&devSrc, superPix * sizeof(uint8_t) * 4);
assert(result == cudaSuccess);
result = cudaMemcpy(devSrc, src, superPix * sizeof(uint8_t) * 4, cudaMemcpyHostToDevice);
assert(result == cudaSuccess);
int numBlocks = superPix / RS2_CUDA_THREADS_PER_BLOCK;
int size;
switch (format)
{
// conversion to Y8 is currently not available in the API
/* case RS2_FORMAT_Y8:
size = 1;
result = cudaMalloc(&devDst, n * sizeof(uint8_t) * size);
assert(result == cudaSuccess);
kernel_unpack_yuy2_y8_cuda<<<numBlocks, RS2_CUDA_THREADS_PER_BLOCK>>>(devSrc, devDst, superPix);
break;
*/
case RS2_FORMAT_Y16:
size = 2;
result = cudaMalloc(&devDst, n * sizeof(uint8_t) * size);
assert(result == cudaSuccess);
kernel_unpack_yuy2_y16_cuda<<<numBlocks, RS2_CUDA_THREADS_PER_BLOCK>>>(devSrc, devDst, superPix);
break;
case RS2_FORMAT_RGB8:
size = 3;
result = cudaMalloc(&devDst, n * sizeof(uint8_t) * size);
assert(result == cudaSuccess);
kernel_unpack_yuy2_rgb8_cuda<<<numBlocks, RS2_CUDA_THREADS_PER_BLOCK>>>(devSrc, devDst, superPix);
break;
case RS2_FORMAT_BGR8:
size = 3;
result = cudaMalloc(&devDst, n * sizeof(uint8_t) * size);
assert(result == cudaSuccess);
kernel_unpack_yuy2_bgr8_cuda<<<numBlocks, RS2_CUDA_THREADS_PER_BLOCK>>>(devSrc, devDst, superPix);
break;
case RS2_FORMAT_RGBA8:
size = 4;
result = cudaMalloc(&devDst, n * sizeof(uint8_t) * size);
assert(result == cudaSuccess);
kernel_unpack_yuy2_rgba8_cuda<<<numBlocks, RS2_CUDA_THREADS_PER_BLOCK>>>(devSrc, devDst, superPix);
break;
case RS2_FORMAT_BGRA8:
size = 4;
result = cudaMalloc(&devDst, n * sizeof(uint8_t) * size);
assert(result == cudaSuccess);
kernel_unpack_yuy2_bgra8_cuda<<<numBlocks, RS2_CUDA_THREADS_PER_BLOCK>>>(devSrc, devDst, superPix);
break;
default:
assert(false);
}
result = cudaGetLastError();
assert(result == cudaSuccess);
result = cudaMemcpy(dst, devDst, n * sizeof(uint8_t) * size, cudaMemcpyDeviceToHost);
assert(result == cudaSuccess);
cudaFree(devSrc);
cudaFree(devDst);
/* cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << "\n"; */
}
__global__ void kernel_split_frame_y8_y8_from_y8i_cuda(uint8_t* a, uint8_t* b, int count, const rscuda::y8i_pixel * source)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= count)
return;
a[i] = source[i].l;
b[i] = source[i].r;
}
void rscuda::y8_y8_from_y8i_cuda_helper(uint8_t* const dest[], int count, const rscuda::y8i_pixel * source)
{
/* cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start); */
int numBlocks = count / RS2_CUDA_THREADS_PER_BLOCK;
uint8_t* a = dest[0];
uint8_t* b = dest[1];
rscuda::y8i_pixel *devSrc = 0;
uint8_t *devDst1 = 0; // for dest[0]
uint8_t *devDst2 = 0; // for dest[1]
cudaError_t result = cudaMalloc(&devSrc, count * sizeof(rscuda::y8i_pixel));
assert(result == cudaSuccess);
result = cudaMemcpy(devSrc, source, count * sizeof(rscuda::y8i_pixel), cudaMemcpyHostToDevice);
assert(result == cudaSuccess);
result = cudaMalloc(&devDst1, count * sizeof(uint8_t));
assert(result == cudaSuccess);
result = cudaMalloc(&devDst2, count * sizeof(uint8_t));
assert(result == cudaSuccess);
kernel_split_frame_y8_y8_from_y8i_cuda<<<numBlocks, RS2_CUDA_THREADS_PER_BLOCK>>>(devDst1, devDst2, count, devSrc);
result = cudaGetLastError();
assert(result == cudaSuccess);
result = cudaMemcpy(a, devDst1, count * sizeof(uint8_t), cudaMemcpyDeviceToHost);
assert(result == cudaSuccess);
result = cudaMemcpy(b, devDst2, count * sizeof(uint8_t), cudaMemcpyDeviceToHost);
assert(result == cudaSuccess);
cudaFree(devSrc);
cudaFree(devDst1);
cudaFree(devDst2);
/* cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << std::endl; */
}
__global__ void kernel_split_frame_y16_y16_from_y12i_cuda(uint16_t* a, uint16_t* b, int count, const rscuda::y12i_pixel * source)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= count)
return;
a[i] = source[i].l() << 6 | source[i].l() >> 4;
b[i] = source[i].r() << 6 | source[i].r() >> 4;
}
void rscuda::y16_y16_from_y12i_10_cuda_helper(uint8_t* const dest[], int count, const rscuda::y12i_pixel * source)
{
/*
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start); */
source = reinterpret_cast<const y12i_pixel*>(source);
int numBlocks = count / RS2_CUDA_THREADS_PER_BLOCK;
uint16_t* a = reinterpret_cast<uint16_t*>(dest[0]);
uint16_t* b = reinterpret_cast<uint16_t*>(dest[1]);
rscuda::y12i_pixel *devSrc = 0;
uint16_t *devDst1 = 0; // for dest[0]
uint16_t *devDst2 = 0; // for dest[1]
cudaError_t result = cudaMalloc(&devSrc, count * sizeof(rscuda::y12i_pixel));
assert(result == cudaSuccess);
result = cudaMemcpy(devSrc, source, count * sizeof(rscuda::y12i_pixel), cudaMemcpyHostToDevice);
assert(result == cudaSuccess);
result = cudaMalloc(&devDst1, count * sizeof(uint16_t));
assert(result == cudaSuccess);
result = cudaMalloc(&devDst2, count * sizeof(uint16_t));
assert(result == cudaSuccess);
kernel_split_frame_y16_y16_from_y12i_cuda<<<numBlocks, RS2_CUDA_THREADS_PER_BLOCK>>>(devDst1, devDst2, count, devSrc);
result = cudaGetLastError();
assert(result == cudaSuccess);
result = cudaMemcpy(a, devDst1, count * sizeof(uint16_t), cudaMemcpyDeviceToHost);
assert(result == cudaSuccess);
result = cudaMemcpy(b, devDst2, count * sizeof(uint16_t), cudaMemcpyDeviceToHost);
assert(result == cudaSuccess);
cudaFree(devSrc);
cudaFree(devDst1);
cudaFree(devDst2);
/*
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << std::endl;
*/
}
__global__ void kernel_z16_y8_from_sr300_inzi_cuda (const uint16_t* source, uint8_t* const dest, int count)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= count)
return;
dest[i] = source[i] >> 2;
}
void rscuda::unpack_z16_y8_from_sr300_inzi_cuda (uint8_t * const dest, const uint16_t * source, int count)
{
/* cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start); */
uint16_t *devSrc = 0;
uint8_t *devDst = 0;
int numBlocks = count / RS2_CUDA_THREADS_PER_BLOCK;
cudaError_t result = cudaMalloc(&devSrc, count * sizeof(uint16_t));
assert(result == cudaSuccess);
result = cudaMemcpy(devSrc, source, count * sizeof(uint16_t), cudaMemcpyHostToDevice);
assert(result == cudaSuccess);
result = cudaMalloc(&devDst, count * sizeof(uint8_t));
assert(result == cudaSuccess);
kernel_z16_y8_from_sr300_inzi_cuda<<<numBlocks, RS2_CUDA_THREADS_PER_BLOCK>>>(devSrc, devDst, count);
result = cudaMemcpy(dest, devDst, count * sizeof(uint8_t), cudaMemcpyDeviceToHost);
assert(result == cudaSuccess);
cudaFree(devSrc);
cudaFree(devDst);
/* cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << std::endl; */
}
__global__ void kernel_z16_y16_from_sr300_inzi_cuda (uint16_t* const source, uint16_t* const dest, int count)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= count)
return;
dest[i] = source[i] << 6;
}
void rscuda::unpack_z16_y16_from_sr300_inzi_cuda(uint16_t * const dest, const uint16_t * source, int count)
{
/* cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start); */
uint16_t *devSrc = 0;
uint16_t *devDst = 0;
int numBlocks = count / RS2_CUDA_THREADS_PER_BLOCK;
cudaError_t result = cudaMalloc(&devSrc, count * sizeof(uint16_t));
assert(result == cudaSuccess);
result = cudaMemcpy(devSrc, source, count * sizeof(uint16_t), cudaMemcpyHostToDevice);
assert(result == cudaSuccess);
result = cudaMalloc(&devDst, count * sizeof(uint16_t));
assert(result == cudaSuccess);
kernel_z16_y16_from_sr300_inzi_cuda<<<numBlocks, RS2_CUDA_THREADS_PER_BLOCK>>>(devSrc, devDst, count);
result = cudaMemcpy(dest, devDst, count * sizeof(uint16_t), cudaMemcpyDeviceToHost);
assert(result == cudaSuccess);
cudaFree(devSrc);
cudaFree(devDst);
/* cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << milliseconds << std::endl; */
}
#endif
|
243c9cabb74d6abea6f20b26e45024335e67a8de.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*** Calculating a derivative with CD ***/
#include <iostream>
#include <fstream>
#include <cmath>
#include <sys/time.h>
void checkErrors(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
hipError_t err;
err = hipDeviceSynchronize();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)\n", e, label);
}
err = hipGetLastError();
if (err != hipSuccess)
{
char *e = (char*) hipGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)\n", e, label);
}
}
double get_time()
{ struct timeval tim;
hipDeviceSynchronize();
gettimeofday(&tim, NULL);
return (double) tim.tv_sec+(tim.tv_usec/1000000.0);
}
__global__ void copy_array(float *u, float *u_prev, int N, int BSZ)
{
int i = threadIdx.x;
int j = threadIdx.y;
int I = blockIdx.y*BSZ*N + blockIdx.x*BSZ + j*N + i;
if (I>=N*N){return;}
u_prev[I] = u[I];
}
// GPU kernel
__global__ void update (float *u, float *u_prev, int N, float h, float dt, float alpha, int BSZ)
{
// Setting up indices
int i = threadIdx.x;
int j = threadIdx.y;
int I = blockIdx.y*BSZ*N + blockIdx.x*BSZ + j*N + i;
if (I>=N*N){return;}
//if (()>=N || j>){return;}
// if not boundary do
if ( (I>N) && (I< N*N-1-N) && (I%N!=0) && (I%N!=N-1))
{ u[I] = u_prev[I] + alpha*dt/(h*h) * (u_prev[I+1] + u_prev[I-1] + u_prev[I+N] + u_prev[I-N] - 4*u_prev[I]);
}
// Boundary conditions are automatically imposed
// as we don't touch boundaries
}
int main()
{
// Allocate in CPU
int N = 128;
int BLOCKSIZE = 16;
hipSetDevice(0);
float xmin = 0.0f;
float xmax = 3.5f;
float ymin = 0.0f;
//float ymax = 2.0f;
float h = (xmax-xmin)/(N-1);
float dt = 0.00001f;
float alpha = 0.645f;
float time = 0.4f;
int steps = ceil(time/dt);
int I;
float *x = new float[N*N];
float *y = new float[N*N];
float *u = new float[N*N];
float *u_prev = new float[N*N];
// Generate mesh and intial condition
for (int j=0; j<N; j++)
{ for (int i=0; i<N; i++)
{ I = N*j + i;
x[I] = xmin + h*i;
y[I] = ymin + h*j;
u[I] = 0.0f;
if ( (i==0) || (j==0))
{u[I] = 200.0f;}
}
}
// Allocate in GPU
float *u_d, *u_prev_d;
hipMalloc( (void**) &u_d, N*N*sizeof(float));
hipMalloc( (void**) &u_prev_d, N*N*sizeof(float));
// Copy to GPU
hipMemcpy(u_d, u, N*N*sizeof(float), hipMemcpyHostToDevice);
// Loop
dim3 dimGrid(int((N-0.5)/BLOCKSIZE)+1, int((N-0.5)/BLOCKSIZE)+1);
dim3 dimBlock(BLOCKSIZE, BLOCKSIZE);
double start = get_time();
for (int t=0; t<steps; t++)
{hipLaunchKernelGGL(( copy_array) , dim3(dimGrid), dim3(dimBlock), 0, 0, u_d, u_prev_d, N, BLOCKSIZE);
hipLaunchKernelGGL(( update) , dim3(dimGrid), dim3(dimBlock), 0, 0, u_d, u_prev_d, N, h, dt, alpha, BLOCKSIZE);
}
double stop = get_time();
checkErrors("update");
double elapsed = stop - start;
std::cout<<"time = "<<elapsed<<std::endl;
// Copy result back to host
hipMemcpy(u, u_d, N*N*sizeof(float), hipMemcpyDeviceToHost);
std::ofstream temperature("temperature_global.txt");
for (int j=0; j<N; j++)
{ for (int i=0; i<N; i++)
{ I = N*j + i;
// std::cout<<u[I]<<"\t";
temperature<<x[I]<<"\t"<<y[I]<<"\t"<<u[I]<<std::endl;
}
temperature<<"\n";
//std::cout<<std::endl;
}
temperature.close();
// Free device
hipFree(u_d);
hipFree(u_prev_d);
}
| 243c9cabb74d6abea6f20b26e45024335e67a8de.cu | /*** Calculating a derivative with CD ***/
#include <iostream>
#include <fstream>
#include <cmath>
#include <sys/time.h>
void checkErrors(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
cudaError_t err;
err = cudaThreadSynchronize();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)\n", e, label);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)\n", e, label);
}
}
double get_time()
{ struct timeval tim;
cudaThreadSynchronize();
gettimeofday(&tim, NULL);
return (double) tim.tv_sec+(tim.tv_usec/1000000.0);
}
__global__ void copy_array(float *u, float *u_prev, int N, int BSZ)
{
int i = threadIdx.x;
int j = threadIdx.y;
int I = blockIdx.y*BSZ*N + blockIdx.x*BSZ + j*N + i;
if (I>=N*N){return;}
u_prev[I] = u[I];
}
// GPU kernel
__global__ void update (float *u, float *u_prev, int N, float h, float dt, float alpha, int BSZ)
{
// Setting up indices
int i = threadIdx.x;
int j = threadIdx.y;
int I = blockIdx.y*BSZ*N + blockIdx.x*BSZ + j*N + i;
if (I>=N*N){return;}
//if (()>=N || j>){return;}
// if not boundary do
if ( (I>N) && (I< N*N-1-N) && (I%N!=0) && (I%N!=N-1))
{ u[I] = u_prev[I] + alpha*dt/(h*h) * (u_prev[I+1] + u_prev[I-1] + u_prev[I+N] + u_prev[I-N] - 4*u_prev[I]);
}
// Boundary conditions are automatically imposed
// as we don't touch boundaries
}
int main()
{
// Allocate in CPU
int N = 128;
int BLOCKSIZE = 16;
cudaSetDevice(0);
float xmin = 0.0f;
float xmax = 3.5f;
float ymin = 0.0f;
//float ymax = 2.0f;
float h = (xmax-xmin)/(N-1);
float dt = 0.00001f;
float alpha = 0.645f;
float time = 0.4f;
int steps = ceil(time/dt);
int I;
float *x = new float[N*N];
float *y = new float[N*N];
float *u = new float[N*N];
float *u_prev = new float[N*N];
// Generate mesh and intial condition
for (int j=0; j<N; j++)
{ for (int i=0; i<N; i++)
{ I = N*j + i;
x[I] = xmin + h*i;
y[I] = ymin + h*j;
u[I] = 0.0f;
if ( (i==0) || (j==0))
{u[I] = 200.0f;}
}
}
// Allocate in GPU
float *u_d, *u_prev_d;
cudaMalloc( (void**) &u_d, N*N*sizeof(float));
cudaMalloc( (void**) &u_prev_d, N*N*sizeof(float));
// Copy to GPU
cudaMemcpy(u_d, u, N*N*sizeof(float), cudaMemcpyHostToDevice);
// Loop
dim3 dimGrid(int((N-0.5)/BLOCKSIZE)+1, int((N-0.5)/BLOCKSIZE)+1);
dim3 dimBlock(BLOCKSIZE, BLOCKSIZE);
double start = get_time();
for (int t=0; t<steps; t++)
{ copy_array <<<dimGrid, dimBlock>>> (u_d, u_prev_d, N, BLOCKSIZE);
update <<<dimGrid, dimBlock>>> (u_d, u_prev_d, N, h, dt, alpha, BLOCKSIZE);
}
double stop = get_time();
checkErrors("update");
double elapsed = stop - start;
std::cout<<"time = "<<elapsed<<std::endl;
// Copy result back to host
cudaMemcpy(u, u_d, N*N*sizeof(float), cudaMemcpyDeviceToHost);
std::ofstream temperature("temperature_global.txt");
for (int j=0; j<N; j++)
{ for (int i=0; i<N; i++)
{ I = N*j + i;
// std::cout<<u[I]<<"\t";
temperature<<x[I]<<"\t"<<y[I]<<"\t"<<u[I]<<std::endl;
}
temperature<<"\n";
//std::cout<<std::endl;
}
temperature.close();
// Free device
cudaFree(u_d);
cudaFree(u_prev_d);
}
|
2f9fb5f1e4ed9147be1bc30ba9bd15275bc2134b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2020 insaneyilin All Rights Reserved.
*
*
*/
#include "../common/common.h"
#include "../common/ocv_image.h"
#define DIM 1024
#define PI 3.1415926535897932f
struct DataBlock {
unsigned char *dev_bitmap;
OCVImage *bitmap;
};
// clean up memory allocated on the GPU
void CleanUp(DataBlock *d) {
CHECK_CUDA_ERROR(hipFree(d->dev_bitmap));
}
__global__ void Kernel(unsigned char *ptr, int ticks) {
// map (BlockIdx, threadIdx) to pixel position
// each thread processes one pixel
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x; // 1d offset
float fx = x - DIM / 2;
float fy = y - DIM / 2;
float d = sqrtf(fx * fx + fy * fy); // distance to image center
unsigned char grey = (unsigned char)(128.0f + 127.0f *
cos(d/10.0f - ticks/7.0f) /
(d/10.0f + 1.0f));
ptr[offset*4 + 0] = grey;
ptr[offset*4 + 1] = grey;
ptr[offset*4 + 2] = grey;
ptr[offset*4 + 3] = 255;
}
int main(int argc, char **argv) {
DataBlock data;
OCVImage bitmap(DIM, DIM);
data.bitmap = &bitmap;
CHECK_CUDA_ERROR(hipMalloc((void**)&data.dev_bitmap, bitmap.image_size()));
// "size" of the Grid, there are DIM/16 x DIM/16 blocks in the grid
dim3 blocks(DIM / 16, DIM / 16);
// "size" of each Block, there are 16 x 16 threads in each block
dim3 threads(16, 16);
int ticks = 0;
bitmap.show("ripple", 30);
// show ripple animation
while (1) {
hipLaunchKernelGGL(( Kernel), dim3(blocks), dim3(threads), 0, 0, data.dev_bitmap, ticks);
CHECK_CUDA_ERROR(hipMemcpy(data.bitmap->get_ptr(),
data.dev_bitmap,
data.bitmap->image_size(),
hipMemcpyDeviceToHost));
++ticks;
char key = bitmap.show("ripple", 30);
if (key == 27) {
break;
}
}
CleanUp(&data);
return 0;
}
| 2f9fb5f1e4ed9147be1bc30ba9bd15275bc2134b.cu | /*
* Copyright 2020 insaneyilin All Rights Reserved.
*
*
*/
#include "../common/common.h"
#include "../common/ocv_image.h"
#define DIM 1024
#define PI 3.1415926535897932f
struct DataBlock {
unsigned char *dev_bitmap;
OCVImage *bitmap;
};
// clean up memory allocated on the GPU
void CleanUp(DataBlock *d) {
CHECK_CUDA_ERROR(cudaFree(d->dev_bitmap));
}
__global__ void Kernel(unsigned char *ptr, int ticks) {
// map (BlockIdx, threadIdx) to pixel position
// each thread processes one pixel
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x; // 1d offset
float fx = x - DIM / 2;
float fy = y - DIM / 2;
float d = sqrtf(fx * fx + fy * fy); // distance to image center
unsigned char grey = (unsigned char)(128.0f + 127.0f *
cos(d/10.0f - ticks/7.0f) /
(d/10.0f + 1.0f));
ptr[offset*4 + 0] = grey;
ptr[offset*4 + 1] = grey;
ptr[offset*4 + 2] = grey;
ptr[offset*4 + 3] = 255;
}
int main(int argc, char **argv) {
DataBlock data;
OCVImage bitmap(DIM, DIM);
data.bitmap = &bitmap;
CHECK_CUDA_ERROR(cudaMalloc((void**)&data.dev_bitmap, bitmap.image_size()));
// "size" of the Grid, there are DIM/16 x DIM/16 blocks in the grid
dim3 blocks(DIM / 16, DIM / 16);
// "size" of each Block, there are 16 x 16 threads in each block
dim3 threads(16, 16);
int ticks = 0;
bitmap.show("ripple", 30);
// show ripple animation
while (1) {
Kernel<<<blocks, threads>>>(data.dev_bitmap, ticks);
CHECK_CUDA_ERROR(cudaMemcpy(data.bitmap->get_ptr(),
data.dev_bitmap,
data.bitmap->image_size(),
cudaMemcpyDeviceToHost));
++ticks;
char key = bitmap.show("ripple", 30);
if (key == 27) {
break;
}
}
CleanUp(&data);
return 0;
}
|
3e2b3a2d245c6a26d39e9417bd5e3c7d0e1d9b24.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//CUDA includes
#include <hipfft.h>
#include <cutil.h>
#include <rocblas.h>
typedef unsigned int uint;
typedef hipfftComplex Complex;
enum WindowType{HAMMING, HANNING, TUKEY};
#define BLOCK_DIM 16
#define BLOCK_SIZE 16
#define CHECK_BANK_CONFLICTS 0
#if CHECK_BANK_CONFLICTS
#define AS(i, j) CUT_BANK_CHECKER(((float*)&As[0][0]), (BLOCK_SIZE * i + j))
#define BS(i, j) CUT_BANK_CHECKER(((float*)&Bs[0][0]), (BLOCK_SIZE * i + j))
#else
#define AS(i, j) As[i][j]
#define BS(i, j) Bs[i][j]
#endif
//***********************************************************************************
static __device__ __host__ inline Complex ComplexAdd(Complex a, Complex b)
{
Complex c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
//***********************************************************************************
static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b)
{
Complex c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
}
static __device__ __host__ inline Complex ComplexConj(Complex a)
{
Complex b;
b.x = a.x;
b.y = -a.y;
return b;
}
static __device__ __host__ inline Complex ComplexMulConj(Complex a, Complex b)
{
Complex c;
c.x = a.x * b.x - a.y * (-b.y);
c.y = a.x * (-b.y) + a.y * b.x;
return c;
}
static __device__ __host__ inline void ComplexMulConjVector(Complex* a, Complex* b, Complex* c, long size)
{
for(long q = 0; q < size; q++) {
c[q].x = a[q].x * b[q].x - a[q].y * (-b[q].y);
c[q].y = a[q].x * (-b[q].y) + a[q].y * b[q].x;
}
}
//***********************************************************************************
__global__ void ComplexConjugate(Complex* a, int size)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for(int i = threadID; i < size; i += numThreads)
{
a[i] = ComplexConj(a[i]);
}
}
//***********************************************************************************
__global__ void DecimateSignal1D(const Complex* orig_signal, Complex* new_signal, int signal_size, int desired_size) {
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
float inc = (float)signal_size / (float)desired_size;
float r = 0.0, portion;
Complex result, v1, v2;
if(threadID < desired_size) {
//find the calculated position in the orignal array for this new value
r = inc * threadID;
portion = ceil(r) - r;
//find the two closest real values
v1 = orig_signal[(int)floor(r)];
v2 = orig_signal[(int)ceil(r)];
if(v1.x == v2.x && v1.y == v2.y) {
new_signal[threadID] = v1;
}
else {
//interpolate the desired value from the two closest real values
result.x = v1.x * (1.0f - portion) + v2.x * portion;
result.y = v1.y * (1.0f - portion) + v2.y * portion;
//assign this value into the desired place in the array
new_signal[threadID] = result;
}
}
}
//***********************************************************************************
__global__ void ThresholdSignal1D(const Complex* orig_signal, Complex* new_signal, int signal_size, float percent_of_max, Complex* threshold) {
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
//create a new signal that has been properly thresholded
for(int i = threadID; i < signal_size; i += numThreads) {
//initialize everything in the new signal to be 0
new_signal[i].x = 0.0;
new_signal[i].y = 0.0;
//test if the matching value in the old signal is above or equal to the threshold
if(orig_signal[i].x >= threshold->x && orig_signal[i].y >= threshold->y) {
new_signal[i].x = orig_signal[i].x;
new_signal[i].y = orig_signal[i].y;
}
}
}
//***********************************************************************************
__global__ void GenerateWindowHamming(Complex* coeff, uint coeff_size) {
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
const int M = coeff_size;
const float PI = 3.14159265358979;
//generate the desired number of Hamming coefficients
for(int i = threadID; i < coeff_size; i += numThreads)
{
coeff[i].x = 0.53836 - 0.46164 * __cosf((2.0 * PI * i) /(M-1));
coeff[i].y = 0.0;
}
}
//***********************************************************************************
__global__ void GenerateWindowHanning(Complex* coeff, uint coeff_size) {
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
const int M = coeff_size;
const float PI = 3.14159265358979;
//generate the desired number of Hanning coefficients
for(int i = threadID; i < coeff_size; i += numThreads)
{
coeff[i].x = 0.5 * (1.0 - __cosf((2.0 * PI * i) / (M - 1)));
coeff[i].y = 0.0;
}
}
//***********************************************************************************
__global__ void GenerateWindowTukey(Complex* coeff, uint coeff_size, float alpha) {
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
const int M = coeff_size;
const float PI = 3.14159265358979;
//generate the desired number of Tukey coefficients
for(int i = threadID; i < coeff_size; i += numThreads)
{
float numerator = i - (1.0 + alpha) * (M - 1.0)/2.0;
float denominator = (1.0 - alpha) * (M - 1.0)/2.0;
coeff[i].x = 0.5 * (1.0 + __cosf((numerator / denominator) * PI));
coeff[i].y = 0.0;
}
}
//***********************************************************************************
__global__ void FAM_MakeWindow(Complex* window, const Complex* coeff, int Np) {
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
if(threadID < Np) {
//multiply the given window by the appropriate complex demodulate
window[threadID * Np + threadID] = coeff[threadID];
}
}
//***********************************************************************************
__global__ void FAM_DownConvert(Complex* complex_demod, int Np, int P, int L) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
float PI = 3.1415926535897932;
if(i < P && j < Np) {
float m = (float)i;
float k = j - Np/2.0;
Complex temp;
temp.x = 0;
temp.y = -(2.0*PI*k*m*L)/Np;
//compute the complex exponential value that will be multipled
//against a specific element in the channelized signal matrix
Complex result;
result.x = exp(temp.x) * cos(temp.y);
result.y = exp(temp.x) * sin(temp.y);
//multiply the exponential value by the specific value in the signal matrix
complex_demod[i * Np + j] = ComplexMul(complex_demod[i * Np + j], result);
}
}
//***********************************************************************************
extern __shared__ Complex buffer[];
__global__ void FAM_ComputeProdSeq(Complex* prod_seq, Complex* complex_demod, long Np, long P) {
long row = blockIdx.x;
long col = threadIdx.x;
Complex* row1 = complex_demod + row*P;
Complex* row2 = complex_demod + col*P;
Complex* result1 = prod_seq + (row*Np+col)*P;
Complex* result2 = prod_seq + (col*Np+row)*P;
Complex* block = (Complex*)buffer;
//each block of threads processes a common row, load this row into shared memory
//each thread will load one element of this row into shared memory. This assumes
//that Np is greater than P
if(col < P)
block[col] = row1[col];
__syncthreads();
//now perform the computation of the product sequence using the same row1 for all threads
//but them a different row2 and result for each thread
ComplexMulConjVector(block, row2, result1, P);
}
//***********************************************************************************
__global__ void FAM_FFTShift_Horizontal(Complex* complex_demod, long Np, long P) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i < P/2 && j < Np) {
Complex temp = complex_demod[i * Np + j];
complex_demod[i * Np + j] = complex_demod[(i+P/2) * Np + j];
complex_demod[(i+P/2) * Np + j] = temp;
}
}
//***********************************************************************************
__global__ void FAM_FFTShift_Vertical(Complex* complex_demod, long Np, long P) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i < P && j < Np/2) {
Complex temp = complex_demod[i * Np + j];
complex_demod[i * Np + j] = complex_demod[i * Np + j + Np/2];
complex_demod[i * Np + j + Np/2] = temp;
}
}
//***********************************************************************************
__global__ void FAM_ComputeSCDFunction(float* Sx, Complex* prod_seq, long N, long Np, long P, long L) {
long k1 = blockIdx.x * blockDim.x + threadIdx.x;
long k2 = blockIdx.y * blockDim.y + threadIdx.y;
float l,k,p,alpha,f;
long kk, ll;
k1 = k1 + P/4;
if(k2 % Np == 0)
l = Np/2.0-1.0;
else
l = (k2%Np) - Np/2.0 - 1.0;
k = ceil((float)k2/(float)Np) - Np/2.0 - 1.0;
p = k1 - P/4 - 1.0;
alpha = (k-l)/Np + (p-1)/L/P;
f = (k+l) / 2.0 / Np;
if(alpha >= -1 && alpha <= 1 && f >= -.5 && f <= .5) {
kk = ceil(1 + Np * (f + .5));
ll = 1 + N * (alpha + 1);
Complex temp = prod_seq[k2*P + (k1+P/4)];
Sx[ll*Np + kk] = sqrt(temp.x * temp.x + temp.y * temp.y);
}
}
////////////////////////////////////////////////////////////////////////////////
//! Matrix multiplication on the device: C = A * B
//! wA is A's width and wB is B's width
////////////////////////////////////////////////////////////////////////////////
__global__ void matrixMul(Complex* C, Complex* A, Complex* B, long wA, long wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
long aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
long aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
long aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
long bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
long bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
Complex Csub;
Csub.x = 0; Csub.y = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ Complex As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ Complex Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
AS(ty, tx) = A[a + wA * ty + tx];
BS(ty, tx) = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
for (int k = 0; k < BLOCK_SIZE; ++k)
Csub = ComplexAdd(Csub, ComplexMul(AS(ty,k), BS(k,tx)));
//Csub += AS(ty, k) * BS(k, tx);
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
// This kernel is optimized to ensure all global reads and writes are coalesced,
// and to avoid bank conflicts in shared memory. This kernel is up to 11x faster
// than the naive kernel below. Note that the shared memory array is sized to
// (BLOCK_DIM+1)*BLOCK_DIM. This pads each row of the 2D block in shared memory
// so that bank conflicts do not occur when threads address the array column-wise.
__global__ void transpose(Complex *odata, Complex *idata, int width, int height)
{
__shared__ Complex block[BLOCK_DIM][BLOCK_DIM+1];
// read the matrix tile into shared memory
unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x;
unsigned int yIndex = blockIdx.y * BLOCK_DIM + threadIdx.y;
if((xIndex < width) && (yIndex < height))
{
unsigned int index_in = yIndex * width + xIndex;
block[threadIdx.y][threadIdx.x] = idata[index_in];
}
__syncthreads();
// write the transposed matrix tile to global memory
xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x;
yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y;
if((xIndex < height) && (yIndex < width))
{
unsigned int index_out = yIndex * height + xIndex;
odata[index_out] = block[threadIdx.x][threadIdx.y];
}
} | 3e2b3a2d245c6a26d39e9417bd5e3c7d0e1d9b24.cu | //CUDA includes
#include <cufft.h>
#include <cutil.h>
#include <cublas.h>
typedef unsigned int uint;
typedef cufftComplex Complex;
enum WindowType{HAMMING, HANNING, TUKEY};
#define BLOCK_DIM 16
#define BLOCK_SIZE 16
#define CHECK_BANK_CONFLICTS 0
#if CHECK_BANK_CONFLICTS
#define AS(i, j) CUT_BANK_CHECKER(((float*)&As[0][0]), (BLOCK_SIZE * i + j))
#define BS(i, j) CUT_BANK_CHECKER(((float*)&Bs[0][0]), (BLOCK_SIZE * i + j))
#else
#define AS(i, j) As[i][j]
#define BS(i, j) Bs[i][j]
#endif
//***********************************************************************************
static __device__ __host__ inline Complex ComplexAdd(Complex a, Complex b)
{
Complex c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
//***********************************************************************************
static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b)
{
Complex c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
}
static __device__ __host__ inline Complex ComplexConj(Complex a)
{
Complex b;
b.x = a.x;
b.y = -a.y;
return b;
}
static __device__ __host__ inline Complex ComplexMulConj(Complex a, Complex b)
{
Complex c;
c.x = a.x * b.x - a.y * (-b.y);
c.y = a.x * (-b.y) + a.y * b.x;
return c;
}
static __device__ __host__ inline void ComplexMulConjVector(Complex* a, Complex* b, Complex* c, long size)
{
for(long q = 0; q < size; q++) {
c[q].x = a[q].x * b[q].x - a[q].y * (-b[q].y);
c[q].y = a[q].x * (-b[q].y) + a[q].y * b[q].x;
}
}
//***********************************************************************************
__global__ void ComplexConjugate(Complex* a, int size)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for(int i = threadID; i < size; i += numThreads)
{
a[i] = ComplexConj(a[i]);
}
}
//***********************************************************************************
__global__ void DecimateSignal1D(const Complex* orig_signal, Complex* new_signal, int signal_size, int desired_size) {
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
float inc = (float)signal_size / (float)desired_size;
float r = 0.0, portion;
Complex result, v1, v2;
if(threadID < desired_size) {
//find the calculated position in the orignal array for this new value
r = inc * threadID;
portion = ceil(r) - r;
//find the two closest real values
v1 = orig_signal[(int)floor(r)];
v2 = orig_signal[(int)ceil(r)];
if(v1.x == v2.x && v1.y == v2.y) {
new_signal[threadID] = v1;
}
else {
//interpolate the desired value from the two closest real values
result.x = v1.x * (1.0f - portion) + v2.x * portion;
result.y = v1.y * (1.0f - portion) + v2.y * portion;
//assign this value into the desired place in the array
new_signal[threadID] = result;
}
}
}
//***********************************************************************************
__global__ void ThresholdSignal1D(const Complex* orig_signal, Complex* new_signal, int signal_size, float percent_of_max, Complex* threshold) {
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
//create a new signal that has been properly thresholded
for(int i = threadID; i < signal_size; i += numThreads) {
//initialize everything in the new signal to be 0
new_signal[i].x = 0.0;
new_signal[i].y = 0.0;
//test if the matching value in the old signal is above or equal to the threshold
if(orig_signal[i].x >= threshold->x && orig_signal[i].y >= threshold->y) {
new_signal[i].x = orig_signal[i].x;
new_signal[i].y = orig_signal[i].y;
}
}
}
//***********************************************************************************
__global__ void GenerateWindowHamming(Complex* coeff, uint coeff_size) {
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
const int M = coeff_size;
const float PI = 3.14159265358979;
//generate the desired number of Hamming coefficients
for(int i = threadID; i < coeff_size; i += numThreads)
{
coeff[i].x = 0.53836 - 0.46164 * __cosf((2.0 * PI * i) /(M-1));
coeff[i].y = 0.0;
}
}
//***********************************************************************************
__global__ void GenerateWindowHanning(Complex* coeff, uint coeff_size) {
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
const int M = coeff_size;
const float PI = 3.14159265358979;
//generate the desired number of Hanning coefficients
for(int i = threadID; i < coeff_size; i += numThreads)
{
coeff[i].x = 0.5 * (1.0 - __cosf((2.0 * PI * i) / (M - 1)));
coeff[i].y = 0.0;
}
}
//***********************************************************************************
__global__ void GenerateWindowTukey(Complex* coeff, uint coeff_size, float alpha) {
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
const int M = coeff_size;
const float PI = 3.14159265358979;
//generate the desired number of Tukey coefficients
for(int i = threadID; i < coeff_size; i += numThreads)
{
float numerator = i - (1.0 + alpha) * (M - 1.0)/2.0;
float denominator = (1.0 - alpha) * (M - 1.0)/2.0;
coeff[i].x = 0.5 * (1.0 + __cosf((numerator / denominator) * PI));
coeff[i].y = 0.0;
}
}
//***********************************************************************************
__global__ void FAM_MakeWindow(Complex* window, const Complex* coeff, int Np) {
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
if(threadID < Np) {
//multiply the given window by the appropriate complex demodulate
window[threadID * Np + threadID] = coeff[threadID];
}
}
//***********************************************************************************
__global__ void FAM_DownConvert(Complex* complex_demod, int Np, int P, int L) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
float PI = 3.1415926535897932;
if(i < P && j < Np) {
float m = (float)i;
float k = j - Np/2.0;
Complex temp;
temp.x = 0;
temp.y = -(2.0*PI*k*m*L)/Np;
//compute the complex exponential value that will be multipled
//against a specific element in the channelized signal matrix
Complex result;
result.x = exp(temp.x) * cos(temp.y);
result.y = exp(temp.x) * sin(temp.y);
//multiply the exponential value by the specific value in the signal matrix
complex_demod[i * Np + j] = ComplexMul(complex_demod[i * Np + j], result);
}
}
//***********************************************************************************
extern __shared__ Complex buffer[];
__global__ void FAM_ComputeProdSeq(Complex* prod_seq, Complex* complex_demod, long Np, long P) {
long row = blockIdx.x;
long col = threadIdx.x;
Complex* row1 = complex_demod + row*P;
Complex* row2 = complex_demod + col*P;
Complex* result1 = prod_seq + (row*Np+col)*P;
Complex* result2 = prod_seq + (col*Np+row)*P;
Complex* block = (Complex*)buffer;
//each block of threads processes a common row, load this row into shared memory
//each thread will load one element of this row into shared memory. This assumes
//that Np is greater than P
if(col < P)
block[col] = row1[col];
__syncthreads();
//now perform the computation of the product sequence using the same row1 for all threads
//but them a different row2 and result for each thread
ComplexMulConjVector(block, row2, result1, P);
}
//***********************************************************************************
__global__ void FAM_FFTShift_Horizontal(Complex* complex_demod, long Np, long P) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i < P/2 && j < Np) {
Complex temp = complex_demod[i * Np + j];
complex_demod[i * Np + j] = complex_demod[(i+P/2) * Np + j];
complex_demod[(i+P/2) * Np + j] = temp;
}
}
//***********************************************************************************
__global__ void FAM_FFTShift_Vertical(Complex* complex_demod, long Np, long P) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i < P && j < Np/2) {
Complex temp = complex_demod[i * Np + j];
complex_demod[i * Np + j] = complex_demod[i * Np + j + Np/2];
complex_demod[i * Np + j + Np/2] = temp;
}
}
//***********************************************************************************
__global__ void FAM_ComputeSCDFunction(float* Sx, Complex* prod_seq, long N, long Np, long P, long L) {
long k1 = blockIdx.x * blockDim.x + threadIdx.x;
long k2 = blockIdx.y * blockDim.y + threadIdx.y;
float l,k,p,alpha,f;
long kk, ll;
k1 = k1 + P/4;
if(k2 % Np == 0)
l = Np/2.0-1.0;
else
l = (k2%Np) - Np/2.0 - 1.0;
k = ceil((float)k2/(float)Np) - Np/2.0 - 1.0;
p = k1 - P/4 - 1.0;
alpha = (k-l)/Np + (p-1)/L/P;
f = (k+l) / 2.0 / Np;
if(alpha >= -1 && alpha <= 1 && f >= -.5 && f <= .5) {
kk = ceil(1 + Np * (f + .5));
ll = 1 + N * (alpha + 1);
Complex temp = prod_seq[k2*P + (k1+P/4)];
Sx[ll*Np + kk] = sqrt(temp.x * temp.x + temp.y * temp.y);
}
}
////////////////////////////////////////////////////////////////////////////////
//! Matrix multiplication on the device: C = A * B
//! wA is A's width and wB is B's width
////////////////////////////////////////////////////////////////////////////////
__global__ void matrixMul(Complex* C, Complex* A, Complex* B, long wA, long wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
long aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
long aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
long aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
long bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
long bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
Complex Csub;
Csub.x = 0; Csub.y = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ Complex As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ Complex Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
AS(ty, tx) = A[a + wA * ty + tx];
BS(ty, tx) = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
for (int k = 0; k < BLOCK_SIZE; ++k)
Csub = ComplexAdd(Csub, ComplexMul(AS(ty,k), BS(k,tx)));
//Csub += AS(ty, k) * BS(k, tx);
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
// This kernel is optimized to ensure all global reads and writes are coalesced,
// and to avoid bank conflicts in shared memory. This kernel is up to 11x faster
// than the naive kernel below. Note that the shared memory array is sized to
// (BLOCK_DIM+1)*BLOCK_DIM. This pads each row of the 2D block in shared memory
// so that bank conflicts do not occur when threads address the array column-wise.
__global__ void transpose(Complex *odata, Complex *idata, int width, int height)
{
__shared__ Complex block[BLOCK_DIM][BLOCK_DIM+1];
// read the matrix tile into shared memory
unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x;
unsigned int yIndex = blockIdx.y * BLOCK_DIM + threadIdx.y;
if((xIndex < width) && (yIndex < height))
{
unsigned int index_in = yIndex * width + xIndex;
block[threadIdx.y][threadIdx.x] = idata[index_in];
}
__syncthreads();
// write the transposed matrix tile to global memory
xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x;
yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y;
if((xIndex < height) && (yIndex < width))
{
unsigned int index_out = yIndex * height + xIndex;
odata[index_out] = block[threadIdx.x][threadIdx.y];
}
} |
956ac7d48812c2586a956455498f971c4864fe6c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//______________________________________________________________//
// functionalities.cu includes //
// all the CUDA functions //
//______________________________________________________________//
#include "functionalities.h"
//======================================= Functions for the inpainting ==============================================
__global__ void global_reverse_sign( float *Image, int n )
{
int ind = threadIdx.x + blockDim.x * blockIdx.x;
if( ind < n )
{
Image[ind] = -Image[ind];
}
}
__global__ void global_vorticity( float *imgU, float *imgV, float *imgVorticity, int *imgDomain, int w, int h, int nc, int n, int FullImage )
{
int ind = threadIdx.x + blockDim.x * blockIdx.x;
int x, y, ch;
float dVdx, dUdy;
ch = (int)(ind) / (int)(w*h);
//ch = 0;
y = ( ind - ch*w*h ) / w;
x = ( ind - ch*w*h ) % w;
int indDomain = x + w*y;
if( ind < n )
{
if ((FullImage == 0) && (x-1>0) && (x+1<w) && (y-1>0) && (y+1<h) && (imgDomain[indDomain] == 2))
{
while ( (imgDomain[x+1 + y*w] == 1) || (imgDomain[x-1 + y*w] == 1) || (imgDomain[x + (y+1)*w] == 1) || (imgDomain[x + (y-1)*w] == 1) || ((imgDomain[x+1 + (y+1)*w] == 1) && (imgDomain[x+1 + y*w] == 0) && (imgDomain[x + (y+1)*w] == 0)) || ((imgDomain[x-1 + (y-1)*w] == 1) && (imgDomain[x-1 + y*w] == 0) && (imgDomain[x + (y-1)*w] == 0)) || ((imgDomain[x+1 + (y-1)*w] == 1) && (imgDomain[x+1 + y*w] == 0) && (imgDomain[x + (y-1)*w] == 0)) || ((imgDomain[x-1 + (y+1)*w] == 1) && (imgDomain[x-1 + y*w] == 0) && (imgDomain[x + (y+1)*w] == 0)) )
{
if (imgDomain[x+1 + y*w] == 1) x = x - 1;
if (imgDomain[x-1 + y*w] == 1) x = x + 1;
if (imgDomain[x + (y+1)*w] == 1) y = y - 1;
if (imgDomain[x + (y-1)*w] == 1) y = y + 1;
if ((imgDomain[x+1 + (y+1)*w] == 1) && (imgDomain[x+1 + y*w] == 0) && (imgDomain[x + (y+1)*w] == 0))
{
x = x - 1;
y = y - 1;
}
if ((imgDomain[x-1 + (y-1)*w] == 1) && (imgDomain[x-1 + y*w] == 0) && (imgDomain[x + (y-1)*w] == 0))
{
x = x + 1;
y = y + 1;
}
if ((imgDomain[x+1 + (y-1)*w] == 1) && (imgDomain[x+1 + y*w] == 0) && (imgDomain[x + (y-1)*w] == 0))
{
x = x - 1;
y = y + 1;
}
if ((imgDomain[x-1 + (y+1)*w] == 1) && (imgDomain[x-1 + y*w] == 0) && (imgDomain[x + (y+1)*w] == 0))
{
x = x + 1;
y = y - 1;
}
}
}
dVdx = (1./32.)*(3*imgV[max(min(w-1, x+1), 0) + w*max(min(h-1,y+1),0) + ch*w*h] + 10*imgV[max(min(w-1, x+1), 0) + w*max(min(h-1,y),0) + ch*w*h] + 3*imgV[max(min(w-1, x+1), 0) + w*max(min(h-1,y-1),0) + ch*w*h] - 3*imgV[max(min(w-1, x-1), 0) + w*max(min(h-1,y+1),0) + ch*w*h] - 10*imgV[max(min(w-1, x-1), 0) + w*max(min(h-1,y),0) + ch*w*h] - 3*imgV[max(min(w-1, x-1), 0) + w*max(min(h-1,y-1),0) + ch*w*h]);
dUdy = (1./32.)*(3*imgU[max(min(w-1, x+1), 0) + w*max(min(h-1,y+1),0) + ch*w*h] + 10*imgU[max(min(w-1, x), 0) + w*max(min(h-1,y+1),0) + ch*w*h] + 3*imgU[max(min(w-1, x-1), 0) + w*max(min(h-1,y+1),0) + ch*w*h] - 3*imgU[max(min(w-1, x+1), 0) + w*max(min(h-1,y-1),0) + ch*w*h] - 10*imgU[max(min(w-1, x), 0) + w*max(min(h-1,y-1),0) + ch*w*h] - 3*imgU[max(min(w-1, x-1), 0) + w*max(min(h-1,y-1),0) + ch*w*h]);
imgVorticity[ind] = ( dVdx - dUdy );
}
}
__global__ void global_solve_Poisson (float *imgOut, float *imgIn, float *initVorticity, float *rhs, int *imgDomain, int w, int h, int nc, int n, float sor_theta, int redOrBlack)
{
float dh = 0.5;
float f;
int ind = threadIdx.x + blockDim.x * blockIdx.x;
int x, y, ch;
ch = (int)(ind) / (int)(w*h);
//ch = 0;
x = ( ind - ch*w*h ) % w;
y = ( ind - ch*w*h ) / w;
int indDomain = x + w*y;
if ( ind<n )
{
bool isActive = ((x<w && y<h) && (((x+y)%2)==redOrBlack));
//bool isActive = (x<w && y<h); //&& (((x+y)%2)==redOrBlack));
// if ( (isActive) && ( (imgDomain[x + (size_t)w*y] == 1) || (imgDomain[x + (size_t)w*y] == 2) ) )
if ( (isActive) && (imgDomain[x + (size_t)w*y] == 1) )
{
float u0 = imgIn[ind];
float upx = (x+1<w? imgIn[x+1 + (size_t)w*(y ) + w*h*ch] : u0);
float umx = (x-1>=0? imgIn[x-1 + (size_t)w*(y ) + w*h*ch] : u0);
float upy = (y+1<h? imgIn[x + (size_t)w*(y+1) + w*h*ch] : u0);
float umy = (y-1>=0? imgIn[x + (size_t)w*(y-1) + w*h*ch] : u0);
//if (imgDomain[ind] == 1)
//{
if ((imgDomain[indDomain+1] == 1) && (imgDomain[indDomain-1] == 1) && (imgDomain[indDomain+w] == 1) && (imgDomain[indDomain-w] == 1))
{
f = dh*dh*rhs[ind];
}
else
{
f = dh*dh*initVorticity[ind];
//f = -dh*dh*rhs[ind];
}
//}
//else
//{
//f = 0.0f;
//}
float val = -( f - (upx + umx + upy + umy) ) / 4.0;
//float val = ((upx + umx + upy + umy) ) / 4.0;
val = sor_theta*val + (1.0-sor_theta)*u0;
imgOut[ind] = val;
}
}
}
__global__ void global_grad( float *imgIn, int *imgDomain, float *v1, float *v2, int w, int h, int nc, int n, int FullImage )
{
int ind = threadIdx.x + blockDim.x * blockIdx.x;
int x, y, ch;
ch = (int)(ind) / (int)(w*h);
//ch = 0;
y = ( ind - ch*w*h ) / w;
x = ( ind - ch*w*h ) % w;
int indDomain = x + w*y;
if( ind < n )
{
if ((FullImage == 0) && (x-1>0) && (x+1<w) && (y-1>0) && (y+1<h) && (imgDomain[indDomain] == 2))
{
while ( (imgDomain[x+1 + y*w] == 1) || (imgDomain[x-1 + y*w] == 1) || (imgDomain[x + (y+1)*w] == 1) || (imgDomain[x + (y-1)*w] == 1) || ((imgDomain[x+1 + (y+1)*w] == 1) && (imgDomain[x+1 + y*w] == 0) && (imgDomain[x + (y+1)*w] == 0)) || ((imgDomain[x-1 + (y-1)*w] == 1) && (imgDomain[x-1 + y*w] == 0) && (imgDomain[x + (y-1)*w] == 0)) || ((imgDomain[x+1 + (y-1)*w] == 1) && (imgDomain[x+1 + y*w] == 0) && (imgDomain[x + (y-1)*w] == 0)) || ((imgDomain[x-1 + (y+1)*w] == 1) && (imgDomain[x-1 + y*w] == 0) && (imgDomain[x + (y+1)*w] == 0)) )
{
if (imgDomain[x+1 + y*w] == 1) x = x - 1;
if (imgDomain[x-1 + y*w] == 1) x = x + 1;
if (imgDomain[x + (y+1)*w] == 1) y = y - 1;
if (imgDomain[x + (y-1)*w] == 1) y = y + 1;
if ((imgDomain[x+1 + (y+1)*w] == 1) && (imgDomain[x+1 + y*w] == 0) && (imgDomain[x + (y+1)*w] == 0))
{
x = x - 1;
y = y - 1;
}
if ((imgDomain[x-1 + (y-1)*w] == 1) && (imgDomain[x-1 + y*w] == 0) && (imgDomain[x + (y-1)*w] == 0))
{
x = x + 1;
y = y + 1;
}
if ((imgDomain[x+1 + (y-1)*w] == 1) && (imgDomain[x+1 + y*w] == 0) && (imgDomain[x + (y-1)*w] == 0))
{
x = x - 1;
y = y + 1;
}
if ((imgDomain[x-1 + (y+1)*w] == 1) && (imgDomain[x-1 + y*w] == 0) && (imgDomain[x + (y+1)*w] == 0))
{
x = x + 1;
y = y - 1;
}
}
}
v1[ind] = (1./32.)*(3*imgIn[max(min(w-1, x+1), 0) + w*max(min(h-1,y+1),0) + ch*w*h] + 10*imgIn[max(min(w-1, x+1), 0) + w*max(min(h-1,y),0) + ch*w*h] + 3*imgIn[max(min(w-1, x+1), 0) + w*max(min(h-1,y-1),0) + ch*w*h] - 3*imgIn[max(min(w-1, x-1), 0) + w*max(min(h-1,y+1),0) + ch*w*h] - 10*imgIn[max(min(w-1, x-1), 0) + w*max(min(h-1,y),0) + ch*w*h] - 3*imgIn[max(min(w-1, x-1), 0) + w*max(min(h-1,y-1),0) + ch*w*h]);
v2[ind] = (1./32.)*(3*imgIn[max(min(w-1, x+1), 0) + w*max(min(h-1,y+1),0) + ch*w*h] + 10*imgIn[max(min(w-1, x), 0) + w*max(min(h-1,y+1),0) + ch*w*h] + 3*imgIn[max(min(w-1, x-1), 0) + w*max(min(h-1,y+1),0) + ch*w*h] - 3*imgIn[max(min(w-1, x+1), 0) + w*max(min(h-1,y-1),0) + ch*w*h] - 10*imgIn[max(min(w-1, x), 0) + w*max(min(h-1,y-1),0) + ch*w*h] - 3*imgIn[max(min(w-1, x-1), 0) + w*max(min(h-1,y-1),0) + ch*w*h]);
}
}
/*
__global__ void global_norm( float *imgIn, float *imgOut, int w, int h, int n )
{
int ind = threadIdx.x + blockDim.x * blockIdx.x;
float temp;
if( ind < n )
{
imgOut[ind] = imgIn[ind]*imgIn[ind];
//imgOut[ind] += imgIn[ind+w*h]*imgIn[ind+w*h];
//imgOut[ind] += imgIn[ind+2*w*h]*imgIn[ind+2*w*h];
imgOut[ind] = sqrtf(imgOut[ind]);
}
}
*/
/*
__device__ int check_color( float *c, float r, float g, float b )
{
float eps = 0.0001;// Color transformation
// source : http://linuxtv.org/downloads/v4l-dvb-apis/colorspaces.html
// float clamp (double x)
// {
// float r = x; /* round to nearest */
//
// if (r < 0.0) return 0.0f;
// else if (r > 1.0) return 1.0f;
// else return r;
// }
// if( ( fabsf( r-c[0] ) < eps) && ( fabsf( g-c[1] ) < eps) && ( fabsf( b-c[2] ) < eps ) ) return 1;
// else return 0;
//}
/*
* __global__ void global_detect_domain( float *imgIn, int *imgDomain, int w, int h, int n )
* {
* float c[3] = {1.0f, 0.0f, 0.0f};
* // For looping around a pixel
* int neighbour[8]={ 1, -1, w, -w, -w-1, -w+1, w-1, w+1 };
* int ind = threadIdx.x + blockDim.x * blockIdx.x;
*
* if( ind < n )
* {
* if( check_color( c, imgIn[ind], imgIn[ind+w*h], imgIn[ind+2*w*h] ) )
* {
* imgDomain[ind] = FLUID;
* for( int i = 0; i < 8; i++ )
* {
* //TODO: Check if ind+neighbour[i] is in the domain!
* if( check_color( c, imgIn[ind+neighbour[i]], imgIn[ind+w*h+neighbour[i]], imgIn[ind+2*w*h+neighbour[i]] ) != 1 )
* {
* imgDomain[ind+neighbour[i]] = INFLOW;
}
}
}
else imgDomain[ind] = OBSTACLE;
}
}
*/
__global__ void global_detect_domain1( float *imgMask, int *imgDomain, int w, int h, int n )
{
float c = 1.0;
// For looping around a pixel
int neighbour[8]={ 1, -1, w, -w, -w-1, -w+1, w-1, w+1 };
int ind = threadIdx.x + blockDim.x * blockIdx.x;
float eps = 0.0001;
if( ind < n )
{
if ( fabsf( imgMask[ind]-c ) < eps )
{
imgDomain[ind] = FLUID;
for( int i = 0; i < 8; i++ )
{
//TODO: Check if ind+neighbour[i] is in the domain!
if ( fabsf( imgMask[ind+neighbour[i]]-c ) > eps )
{
imgDomain[ind+neighbour[i]] = INFLOW;
}
}
}
else imgDomain[ind] = OBSTACLE;
}
}
__global__ void global_detect_domain2( float *imgMask, int *imgDomain, int w, int h, int n )
{
float c = 1.0;
// For looping around a pixel
int neighbour[8]={ 1, -1, w, -w, -w-1, -w+1, w-1, w+1 };
int ind = threadIdx.x + blockDim.x * blockIdx.x;
// Try to enlarge the domain by one pixel - there is always some strange interpolation happening, so the masks are not perfect
if( ind < n )
{
if ( imgDomain[ind] == INFLOW ) imgDomain[ind] = FLUID;
}
}
__global__ void global_detect_domain3( float *imgMask, int *imgDomain, int w, int h, int n )
{
float c = 1.0;
// For looping around a pixel
int neighbour[8]={ 1, -1, w, -w, -w-1, -w+1, w-1, w+1 };
int ind = threadIdx.x + blockDim.x * blockIdx.x;
if( ind < n )
{
if (imgDomain[ind] == FLUID)
{
for( int i = 0; i < 8; i++ )
{
//TODO: Check if ind+neighbour[i] is in the domain!
if (imgDomain[ind+neighbour[i]] == OBSTACLE)
{
imgDomain[ind+neighbour[i]] = INFLOW;
}
}
}
}
}
//======================================= Functions for anisotropic diffusion ==============================================
__global__ void update_aniso_diff(float *imgIn, float *divergence, int *imgDomain, float *imgOut, float timestep, int w, int h, int nc, int n)
{
int ind = threadIdx.x + blockDim.x * blockIdx.x;
int x, y, ch;
ch = (int)(ind) / (int)(w*h);
y = (ind - ch*w*h) / (int)w;
x = (ind - ch*w*h) % (int)w;
if (ind<n)
{
if (imgDomain[x + (size_t)w*y] == 1)
{
imgOut[ind] = imgIn[ind] + timestep*divergence[ind];
}
else
{
imgOut[ind] = imgIn[ind];
}
}
}
__host__ __device__ float g_dash(float s)
{
float eps = 0.01;
//return 1.0f;
return (1.0f/max(eps, s));
//return expf(-s*s/eps)/eps;
}
__global__ void global_diffusivity(float *v1, float *v2, float *diffusivity, int w, int h, int nc, int n)
{
int ind = threadIdx.x + blockDim.x * blockIdx.x;
int x, y, ch;
ch = (int)(ind) / (int)(w*h);
y = (ind - ch*w*h) / (int)w;
x = (ind - ch*w*h) % (int)w;
if (ind<n)
{
diffusivity[ind] = g_dash(sqrtf( v1[ind]*v1[ind] + v2[ind]*v2[ind]));
}
}
__global__ void mult_scal_vec(float *scal_field, float *vec_field, int w, int h, int nc, int n)
{
int ind = threadIdx.x + blockDim.x * blockIdx.x;
int x, y, ch;
ch = (int)(ind) / (int)(w*h);
y = (ind - ch*w*h) / (int)w;
x = (ind - ch*w*h) % (int)w;
if (ind<n)
{
vec_field[ind] = scal_field[ind]*vec_field[ind];
//vec_field[ind+w*h] = scal_field[ind]*vec_field[ind+w*h];
//vec_field[ind+2*w*h] = scal_field[ind]*vec_field[ind+2*w*h];
}
}
__device__ float aniso_cuda_diff_x(float a, float b, int x, int w)
{
if (x+1<w)
{
return (a - b);
}
else
{
return 0.0f;
}
}
__device__ float aniso_cuda_diff_y(float a, float b, int y, int h)
{
if (y+1<h)
{
return (a - b);
}
else
{
return 0.0f;
}
}
__global__ void aniso_global_grad(float *imgIn, float *v1, float *v2, int w, int h, int nc, int n)
{
int ind = threadIdx.x + blockDim.x * blockIdx.x;
int x, y, ch;
ch = (int)(ind) / (int)(w*h);
y = (ind - ch*w*h) / (int)w;
x = (ind - ch*w*h) % (int)w;
if (ind<n)
{
v1[ind] = aniso_cuda_diff_x(imgIn[ind+1], imgIn[ind], x, w);
v2[ind] = aniso_cuda_diff_y(imgIn[ind+w], imgIn[ind], y, h);
}
}
__device__ float aniso_cuda_div_x(float a, float b, int x, int w)
{
if ((x+1<w) && (x>0))
{
return (a - b);
}
else if (x+1<w)
{
return (a - 0);
}
else if (x>0)
{
return (0 - b);
}
else
{
return 0.;
}
}
__device__ float aniso_cuda_div_y(float a, float b, int y, int h)
{
if ((y+1<h) && (y>0))
{
return (a - b);
}
else if (y+1<h)
{
return (a - 0);
}
else if (y>0)
{
return (0 - b);
}
else
{
return 0.;
}
}
__global__ void aniso_global_div(float *v1, float *v2, float *imgOut, int w, int h, int nc, int n)
{
int ind = threadIdx.x + blockDim.x * blockIdx.x;
int x, y, ch;
ch = (int)(ind) / (int)(w*h);
y = (ind - ch*w*h) / (int)w;
x = (ind - ch*w*h) % (int)w;
if ((ind<n) && (ind-w>=0) && (ind-1>=0))
{
imgOut[ind] = aniso_cuda_div_x(v1[ind], v1[ind-1], x, w) + aniso_cuda_div_y(v2[ind], v2[ind-w], y, h);
}
}
__global__ void aniso_global_norm(float *imgIn, float *imgOut, int w, int h, int n)
{
int ind = threadIdx.x + blockDim.x * blockIdx.x;
if (ind<n)
{
imgOut[ind] = imgIn[ind]*imgIn[ind];
//imgOut[ind] += imgIn[ind+w*h]*imgIn[ind+w*h];
//imgOut[ind] += imgIn[ind+2*w*h]*imgIn[ind+2*w*h];
imgOut[ind] = sqrtf(imgOut[ind]);
}
}
void aniso_diff(float *imgIn, int *imgDomain, float *imgOut, int w, int h, int nc, float tau, int N, dim3 grid, dim3 block)
{
float *v1 = new float[(size_t)w*h*nc];
float *v2 = new float[(size_t)w*h*nc];
float *divergence = new float[(size_t)w*h*nc];
float *diffusivity = new float[(size_t)w*h];
float *imgOutGray = new float[(size_t)w*h];
int n = w*h*nc;
for (int i=0; i<N; i++)
{
// allocate GPU memory
float *gpu_In, *gpu_v1, *gpu_v2, *gpu_Out, *gpu_Out_Gray;
int *gpu_Domain;
hipMalloc(&gpu_In, n*sizeof(float));
CUDA_CHECK;
hipMalloc(&gpu_v1, n*sizeof(float));
CUDA_CHECK;
hipMalloc(&gpu_v2, n*sizeof(float));
CUDA_CHECK;
// copy host memory to device
hipMemcpy(gpu_In, imgIn, n*sizeof(float), hipMemcpyHostToDevice);
CUDA_CHECK;
hipMemcpy(gpu_v1, v1, n*sizeof(float), hipMemcpyHostToDevice);
CUDA_CHECK;
hipMemcpy(gpu_v2, v2, n*sizeof(float), hipMemcpyHostToDevice);
CUDA_CHECK;
// launch kernel
//dim3 block = dim3(128,1,1);
//dim3 grid = dim3((n + block.x - 1) / block.x, 1, 1);
hipLaunchKernelGGL(( aniso_global_grad) , dim3(grid),dim3(block), 0, 0, gpu_In, gpu_v1, gpu_v2, w, h, nc, n);
// copy result back to host (CPU) memory
hipMemcpy(v1, gpu_v1, n * sizeof(float), hipMemcpyDeviceToHost );
CUDA_CHECK;
hipMemcpy(v2, gpu_v2, n * sizeof(float), hipMemcpyDeviceToHost );
CUDA_CHECK;
// free device (GPU) memory
hipFree(gpu_In);
CUDA_CHECK;
hipFree(gpu_v1);
CUDA_CHECK;
hipFree(gpu_v2);
CUDA_CHECK;
// Calculate diffusivity and multiply by gradient
// allocate GPU memory
float *gpu_diffusivity;
hipMalloc(&gpu_v1, n*sizeof(float));
CUDA_CHECK;
hipMalloc(&gpu_v2, n*sizeof(float));
CUDA_CHECK;
hipMalloc(&gpu_diffusivity, w*h*sizeof(float));
CUDA_CHECK;
// copy host memory to device
hipMemcpy(gpu_v1, v1, n*sizeof(float), hipMemcpyHostToDevice);
CUDA_CHECK;
hipMemcpy(gpu_v2, v2, n*sizeof(float), hipMemcpyHostToDevice);
CUDA_CHECK;
hipMemcpy(gpu_diffusivity, diffusivity, w*h*sizeof(float), hipMemcpyHostToDevice);
CUDA_CHECK;
// launch kernel
hipLaunchKernelGGL(( global_diffusivity) , dim3(grid),dim3(block), 0, 0, gpu_v1, gpu_v2, gpu_diffusivity, w, h, nc, w*h);
hipLaunchKernelGGL(( mult_scal_vec) , dim3(grid),dim3(block), 0, 0, gpu_diffusivity, gpu_v1, w, h, nc, w*h);
hipLaunchKernelGGL(( mult_scal_vec) , dim3(grid),dim3(block), 0, 0, gpu_diffusivity, gpu_v2, w, h, nc, w*h);
// copy result back to host (CPU) memory
hipMemcpy(diffusivity, gpu_diffusivity, w*h * sizeof(float), hipMemcpyDeviceToHost );
CUDA_CHECK;
// copy result back to host (CPU) memory
hipMemcpy(v1, gpu_v1, n * sizeof(float), hipMemcpyDeviceToHost );
CUDA_CHECK;
hipMemcpy(v2, gpu_v2, n * sizeof(float), hipMemcpyDeviceToHost );
CUDA_CHECK;
// free device (GPU) memory
hipFree(gpu_diffusivity);
CUDA_CHECK;
hipFree(gpu_v1);
CUDA_CHECK;
hipFree(gpu_v2);
CUDA_CHECK;
// Calculate divergence of a gradient
// allocate GPU memory
float *gpu_divergence;
hipMalloc(&gpu_v1, n*sizeof(float));
CUDA_CHECK;
hipMalloc(&gpu_v2, n*sizeof(float));
CUDA_CHECK;
hipMalloc(&gpu_divergence, n*sizeof(float));
CUDA_CHECK;
// copy host memory to device
hipMemcpy(gpu_v1, v1, n*sizeof(float), hipMemcpyHostToDevice);
CUDA_CHECK;
hipMemcpy(gpu_v2, v2, n*sizeof(float), hipMemcpyHostToDevice);
CUDA_CHECK;
hipMemcpy(gpu_divergence, divergence, n*sizeof(float), hipMemcpyHostToDevice);
CUDA_CHECK;
// launch kernel
//dim3 block = dim3(128,1,1);
//dim3 grid = dim3((n + block.x - 1) / block.x, 1, 1);
hipLaunchKernelGGL(( aniso_global_div) , dim3(grid),dim3(block), 0, 0, gpu_v1, gpu_v2, gpu_divergence, w, h, nc, n);
// copy result back to host (CPU) memory
hipMemcpy(divergence, gpu_divergence, n * sizeof(float), hipMemcpyDeviceToHost );
CUDA_CHECK;
// free device (GPU) memory
hipFree(gpu_v1);
CUDA_CHECK;
hipFree(gpu_v2);
CUDA_CHECK;
hipFree(gpu_divergence);
CUDA_CHECK;
// Update image
// allocate GPU memory
//float *gpu_In, *gpu_Out_Gray;
hipMalloc(&gpu_In, n*sizeof(float));
CUDA_CHECK;
hipMalloc(&gpu_Out, n*sizeof(float));
CUDA_CHECK;
hipMalloc(&gpu_divergence, n*sizeof(float));
CUDA_CHECK;
hipMalloc(&gpu_Domain, n*sizeof(int));
CUDA_CHECK;
// copy host memory to device
hipMemcpy(gpu_In, imgIn, n*sizeof(float), hipMemcpyHostToDevice);
CUDA_CHECK;
hipMemcpy(gpu_divergence, divergence, n*sizeof(float), hipMemcpyHostToDevice);
CUDA_CHECK;
hipMemcpy(gpu_Domain, imgDomain, n*sizeof(int), hipMemcpyHostToDevice);
CUDA_CHECK;
// launch kernel
hipLaunchKernelGGL(( update_aniso_diff) , dim3(grid),dim3(block), 0, 0, gpu_In, gpu_divergence, gpu_Domain, gpu_Out, tau, w, h, nc, n);
// copy result back to host (CPU) memory
hipMemcpy(imgOut, gpu_Out, n * sizeof(float), hipMemcpyDeviceToHost );
CUDA_CHECK;
hipMemcpy(imgIn, gpu_Out, n * sizeof(float), hipMemcpyDeviceToHost );
CUDA_CHECK;
// free device (GPU) memory
hipFree(gpu_In);
CUDA_CHECK;
hipFree(gpu_divergence);
CUDA_CHECK;
hipFree(gpu_Domain);
CUDA_CHECK;
hipFree(gpu_Out);
CUDA_CHECK;
}
delete[] v1;
delete[] v2;
delete[] diffusivity;
delete[] divergence;
}
| 956ac7d48812c2586a956455498f971c4864fe6c.cu | //______________________________________________________________//
// functionalities.cu includes //
// all the CUDA functions //
//______________________________________________________________//
#include "functionalities.h"
//======================================= Functions for the inpainting ==============================================
__global__ void global_reverse_sign( float *Image, int n )
{
int ind = threadIdx.x + blockDim.x * blockIdx.x;
if( ind < n )
{
Image[ind] = -Image[ind];
}
}
__global__ void global_vorticity( float *imgU, float *imgV, float *imgVorticity, int *imgDomain, int w, int h, int nc, int n, int FullImage )
{
int ind = threadIdx.x + blockDim.x * blockIdx.x;
int x, y, ch;
float dVdx, dUdy;
ch = (int)(ind) / (int)(w*h);
//ch = 0;
y = ( ind - ch*w*h ) / w;
x = ( ind - ch*w*h ) % w;
int indDomain = x + w*y;
if( ind < n )
{
if ((FullImage == 0) && (x-1>0) && (x+1<w) && (y-1>0) && (y+1<h) && (imgDomain[indDomain] == 2))
{
while ( (imgDomain[x+1 + y*w] == 1) || (imgDomain[x-1 + y*w] == 1) || (imgDomain[x + (y+1)*w] == 1) || (imgDomain[x + (y-1)*w] == 1) || ((imgDomain[x+1 + (y+1)*w] == 1) && (imgDomain[x+1 + y*w] == 0) && (imgDomain[x + (y+1)*w] == 0)) || ((imgDomain[x-1 + (y-1)*w] == 1) && (imgDomain[x-1 + y*w] == 0) && (imgDomain[x + (y-1)*w] == 0)) || ((imgDomain[x+1 + (y-1)*w] == 1) && (imgDomain[x+1 + y*w] == 0) && (imgDomain[x + (y-1)*w] == 0)) || ((imgDomain[x-1 + (y+1)*w] == 1) && (imgDomain[x-1 + y*w] == 0) && (imgDomain[x + (y+1)*w] == 0)) )
{
if (imgDomain[x+1 + y*w] == 1) x = x - 1;
if (imgDomain[x-1 + y*w] == 1) x = x + 1;
if (imgDomain[x + (y+1)*w] == 1) y = y - 1;
if (imgDomain[x + (y-1)*w] == 1) y = y + 1;
if ((imgDomain[x+1 + (y+1)*w] == 1) && (imgDomain[x+1 + y*w] == 0) && (imgDomain[x + (y+1)*w] == 0))
{
x = x - 1;
y = y - 1;
}
if ((imgDomain[x-1 + (y-1)*w] == 1) && (imgDomain[x-1 + y*w] == 0) && (imgDomain[x + (y-1)*w] == 0))
{
x = x + 1;
y = y + 1;
}
if ((imgDomain[x+1 + (y-1)*w] == 1) && (imgDomain[x+1 + y*w] == 0) && (imgDomain[x + (y-1)*w] == 0))
{
x = x - 1;
y = y + 1;
}
if ((imgDomain[x-1 + (y+1)*w] == 1) && (imgDomain[x-1 + y*w] == 0) && (imgDomain[x + (y+1)*w] == 0))
{
x = x + 1;
y = y - 1;
}
}
}
dVdx = (1./32.)*(3*imgV[max(min(w-1, x+1), 0) + w*max(min(h-1,y+1),0) + ch*w*h] + 10*imgV[max(min(w-1, x+1), 0) + w*max(min(h-1,y),0) + ch*w*h] + 3*imgV[max(min(w-1, x+1), 0) + w*max(min(h-1,y-1),0) + ch*w*h] - 3*imgV[max(min(w-1, x-1), 0) + w*max(min(h-1,y+1),0) + ch*w*h] - 10*imgV[max(min(w-1, x-1), 0) + w*max(min(h-1,y),0) + ch*w*h] - 3*imgV[max(min(w-1, x-1), 0) + w*max(min(h-1,y-1),0) + ch*w*h]);
dUdy = (1./32.)*(3*imgU[max(min(w-1, x+1), 0) + w*max(min(h-1,y+1),0) + ch*w*h] + 10*imgU[max(min(w-1, x), 0) + w*max(min(h-1,y+1),0) + ch*w*h] + 3*imgU[max(min(w-1, x-1), 0) + w*max(min(h-1,y+1),0) + ch*w*h] - 3*imgU[max(min(w-1, x+1), 0) + w*max(min(h-1,y-1),0) + ch*w*h] - 10*imgU[max(min(w-1, x), 0) + w*max(min(h-1,y-1),0) + ch*w*h] - 3*imgU[max(min(w-1, x-1), 0) + w*max(min(h-1,y-1),0) + ch*w*h]);
imgVorticity[ind] = ( dVdx - dUdy );
}
}
__global__ void global_solve_Poisson (float *imgOut, float *imgIn, float *initVorticity, float *rhs, int *imgDomain, int w, int h, int nc, int n, float sor_theta, int redOrBlack)
{
float dh = 0.5;
float f;
int ind = threadIdx.x + blockDim.x * blockIdx.x;
int x, y, ch;
ch = (int)(ind) / (int)(w*h);
//ch = 0;
x = ( ind - ch*w*h ) % w;
y = ( ind - ch*w*h ) / w;
int indDomain = x + w*y;
if ( ind<n )
{
bool isActive = ((x<w && y<h) && (((x+y)%2)==redOrBlack));
//bool isActive = (x<w && y<h); //&& (((x+y)%2)==redOrBlack));
// if ( (isActive) && ( (imgDomain[x + (size_t)w*y] == 1) || (imgDomain[x + (size_t)w*y] == 2) ) )
if ( (isActive) && (imgDomain[x + (size_t)w*y] == 1) )
{
float u0 = imgIn[ind];
float upx = (x+1<w? imgIn[x+1 + (size_t)w*(y ) + w*h*ch] : u0);
float umx = (x-1>=0? imgIn[x-1 + (size_t)w*(y ) + w*h*ch] : u0);
float upy = (y+1<h? imgIn[x + (size_t)w*(y+1) + w*h*ch] : u0);
float umy = (y-1>=0? imgIn[x + (size_t)w*(y-1) + w*h*ch] : u0);
//if (imgDomain[ind] == 1)
//{
if ((imgDomain[indDomain+1] == 1) && (imgDomain[indDomain-1] == 1) && (imgDomain[indDomain+w] == 1) && (imgDomain[indDomain-w] == 1))
{
f = dh*dh*rhs[ind];
}
else
{
f = dh*dh*initVorticity[ind];
//f = -dh*dh*rhs[ind];
}
//}
//else
//{
//f = 0.0f;
//}
float val = -( f - (upx + umx + upy + umy) ) / 4.0;
//float val = ((upx + umx + upy + umy) ) / 4.0;
val = sor_theta*val + (1.0-sor_theta)*u0;
imgOut[ind] = val;
}
}
}
__global__ void global_grad( float *imgIn, int *imgDomain, float *v1, float *v2, int w, int h, int nc, int n, int FullImage )
{
int ind = threadIdx.x + blockDim.x * blockIdx.x;
int x, y, ch;
ch = (int)(ind) / (int)(w*h);
//ch = 0;
y = ( ind - ch*w*h ) / w;
x = ( ind - ch*w*h ) % w;
int indDomain = x + w*y;
if( ind < n )
{
if ((FullImage == 0) && (x-1>0) && (x+1<w) && (y-1>0) && (y+1<h) && (imgDomain[indDomain] == 2))
{
while ( (imgDomain[x+1 + y*w] == 1) || (imgDomain[x-1 + y*w] == 1) || (imgDomain[x + (y+1)*w] == 1) || (imgDomain[x + (y-1)*w] == 1) || ((imgDomain[x+1 + (y+1)*w] == 1) && (imgDomain[x+1 + y*w] == 0) && (imgDomain[x + (y+1)*w] == 0)) || ((imgDomain[x-1 + (y-1)*w] == 1) && (imgDomain[x-1 + y*w] == 0) && (imgDomain[x + (y-1)*w] == 0)) || ((imgDomain[x+1 + (y-1)*w] == 1) && (imgDomain[x+1 + y*w] == 0) && (imgDomain[x + (y-1)*w] == 0)) || ((imgDomain[x-1 + (y+1)*w] == 1) && (imgDomain[x-1 + y*w] == 0) && (imgDomain[x + (y+1)*w] == 0)) )
{
if (imgDomain[x+1 + y*w] == 1) x = x - 1;
if (imgDomain[x-1 + y*w] == 1) x = x + 1;
if (imgDomain[x + (y+1)*w] == 1) y = y - 1;
if (imgDomain[x + (y-1)*w] == 1) y = y + 1;
if ((imgDomain[x+1 + (y+1)*w] == 1) && (imgDomain[x+1 + y*w] == 0) && (imgDomain[x + (y+1)*w] == 0))
{
x = x - 1;
y = y - 1;
}
if ((imgDomain[x-1 + (y-1)*w] == 1) && (imgDomain[x-1 + y*w] == 0) && (imgDomain[x + (y-1)*w] == 0))
{
x = x + 1;
y = y + 1;
}
if ((imgDomain[x+1 + (y-1)*w] == 1) && (imgDomain[x+1 + y*w] == 0) && (imgDomain[x + (y-1)*w] == 0))
{
x = x - 1;
y = y + 1;
}
if ((imgDomain[x-1 + (y+1)*w] == 1) && (imgDomain[x-1 + y*w] == 0) && (imgDomain[x + (y+1)*w] == 0))
{
x = x + 1;
y = y - 1;
}
}
}
v1[ind] = (1./32.)*(3*imgIn[max(min(w-1, x+1), 0) + w*max(min(h-1,y+1),0) + ch*w*h] + 10*imgIn[max(min(w-1, x+1), 0) + w*max(min(h-1,y),0) + ch*w*h] + 3*imgIn[max(min(w-1, x+1), 0) + w*max(min(h-1,y-1),0) + ch*w*h] - 3*imgIn[max(min(w-1, x-1), 0) + w*max(min(h-1,y+1),0) + ch*w*h] - 10*imgIn[max(min(w-1, x-1), 0) + w*max(min(h-1,y),0) + ch*w*h] - 3*imgIn[max(min(w-1, x-1), 0) + w*max(min(h-1,y-1),0) + ch*w*h]);
v2[ind] = (1./32.)*(3*imgIn[max(min(w-1, x+1), 0) + w*max(min(h-1,y+1),0) + ch*w*h] + 10*imgIn[max(min(w-1, x), 0) + w*max(min(h-1,y+1),0) + ch*w*h] + 3*imgIn[max(min(w-1, x-1), 0) + w*max(min(h-1,y+1),0) + ch*w*h] - 3*imgIn[max(min(w-1, x+1), 0) + w*max(min(h-1,y-1),0) + ch*w*h] - 10*imgIn[max(min(w-1, x), 0) + w*max(min(h-1,y-1),0) + ch*w*h] - 3*imgIn[max(min(w-1, x-1), 0) + w*max(min(h-1,y-1),0) + ch*w*h]);
}
}
/*
__global__ void global_norm( float *imgIn, float *imgOut, int w, int h, int n )
{
int ind = threadIdx.x + blockDim.x * blockIdx.x;
float temp;
if( ind < n )
{
imgOut[ind] = imgIn[ind]*imgIn[ind];
//imgOut[ind] += imgIn[ind+w*h]*imgIn[ind+w*h];
//imgOut[ind] += imgIn[ind+2*w*h]*imgIn[ind+2*w*h];
imgOut[ind] = sqrtf(imgOut[ind]);
}
}
*/
/*
__device__ int check_color( float *c, float r, float g, float b )
{
float eps = 0.0001;// Color transformation
// source : http://linuxtv.org/downloads/v4l-dvb-apis/colorspaces.html
// float clamp (double x)
// {
// float r = x; /* round to nearest */
//
// if (r < 0.0) return 0.0f;
// else if (r > 1.0) return 1.0f;
// else return r;
// }
// if( ( fabsf( r-c[0] ) < eps) && ( fabsf( g-c[1] ) < eps) && ( fabsf( b-c[2] ) < eps ) ) return 1;
// else return 0;
//}
/*
* __global__ void global_detect_domain( float *imgIn, int *imgDomain, int w, int h, int n )
* {
* float c[3] = {1.0f, 0.0f, 0.0f};
* // For looping around a pixel
* int neighbour[8]={ 1, -1, w, -w, -w-1, -w+1, w-1, w+1 };
* int ind = threadIdx.x + blockDim.x * blockIdx.x;
*
* if( ind < n )
* {
* if( check_color( c, imgIn[ind], imgIn[ind+w*h], imgIn[ind+2*w*h] ) )
* {
* imgDomain[ind] = FLUID;
* for( int i = 0; i < 8; i++ )
* {
* //TODO: Check if ind+neighbour[i] is in the domain!
* if( check_color( c, imgIn[ind+neighbour[i]], imgIn[ind+w*h+neighbour[i]], imgIn[ind+2*w*h+neighbour[i]] ) != 1 )
* {
* imgDomain[ind+neighbour[i]] = INFLOW;
}
}
}
else imgDomain[ind] = OBSTACLE;
}
}
*/
__global__ void global_detect_domain1( float *imgMask, int *imgDomain, int w, int h, int n )
{
float c = 1.0;
// For looping around a pixel
int neighbour[8]={ 1, -1, w, -w, -w-1, -w+1, w-1, w+1 };
int ind = threadIdx.x + blockDim.x * blockIdx.x;
float eps = 0.0001;
if( ind < n )
{
if ( fabsf( imgMask[ind]-c ) < eps )
{
imgDomain[ind] = FLUID;
for( int i = 0; i < 8; i++ )
{
//TODO: Check if ind+neighbour[i] is in the domain!
if ( fabsf( imgMask[ind+neighbour[i]]-c ) > eps )
{
imgDomain[ind+neighbour[i]] = INFLOW;
}
}
}
else imgDomain[ind] = OBSTACLE;
}
}
__global__ void global_detect_domain2( float *imgMask, int *imgDomain, int w, int h, int n )
{
float c = 1.0;
// For looping around a pixel
int neighbour[8]={ 1, -1, w, -w, -w-1, -w+1, w-1, w+1 };
int ind = threadIdx.x + blockDim.x * blockIdx.x;
// Try to enlarge the domain by one pixel - there is always some strange interpolation happening, so the masks are not perfect
if( ind < n )
{
if ( imgDomain[ind] == INFLOW ) imgDomain[ind] = FLUID;
}
}
__global__ void global_detect_domain3( float *imgMask, int *imgDomain, int w, int h, int n )
{
float c = 1.0;
// For looping around a pixel
int neighbour[8]={ 1, -1, w, -w, -w-1, -w+1, w-1, w+1 };
int ind = threadIdx.x + blockDim.x * blockIdx.x;
if( ind < n )
{
if (imgDomain[ind] == FLUID)
{
for( int i = 0; i < 8; i++ )
{
//TODO: Check if ind+neighbour[i] is in the domain!
if (imgDomain[ind+neighbour[i]] == OBSTACLE)
{
imgDomain[ind+neighbour[i]] = INFLOW;
}
}
}
}
}
//======================================= Functions for anisotropic diffusion ==============================================
__global__ void update_aniso_diff(float *imgIn, float *divergence, int *imgDomain, float *imgOut, float timestep, int w, int h, int nc, int n)
{
int ind = threadIdx.x + blockDim.x * blockIdx.x;
int x, y, ch;
ch = (int)(ind) / (int)(w*h);
y = (ind - ch*w*h) / (int)w;
x = (ind - ch*w*h) % (int)w;
if (ind<n)
{
if (imgDomain[x + (size_t)w*y] == 1)
{
imgOut[ind] = imgIn[ind] + timestep*divergence[ind];
}
else
{
imgOut[ind] = imgIn[ind];
}
}
}
__host__ __device__ float g_dash(float s)
{
float eps = 0.01;
//return 1.0f;
return (1.0f/max(eps, s));
//return expf(-s*s/eps)/eps;
}
__global__ void global_diffusivity(float *v1, float *v2, float *diffusivity, int w, int h, int nc, int n)
{
int ind = threadIdx.x + blockDim.x * blockIdx.x;
int x, y, ch;
ch = (int)(ind) / (int)(w*h);
y = (ind - ch*w*h) / (int)w;
x = (ind - ch*w*h) % (int)w;
if (ind<n)
{
diffusivity[ind] = g_dash(sqrtf( v1[ind]*v1[ind] + v2[ind]*v2[ind]));
}
}
__global__ void mult_scal_vec(float *scal_field, float *vec_field, int w, int h, int nc, int n)
{
int ind = threadIdx.x + blockDim.x * blockIdx.x;
int x, y, ch;
ch = (int)(ind) / (int)(w*h);
y = (ind - ch*w*h) / (int)w;
x = (ind - ch*w*h) % (int)w;
if (ind<n)
{
vec_field[ind] = scal_field[ind]*vec_field[ind];
//vec_field[ind+w*h] = scal_field[ind]*vec_field[ind+w*h];
//vec_field[ind+2*w*h] = scal_field[ind]*vec_field[ind+2*w*h];
}
}
__device__ float aniso_cuda_diff_x(float a, float b, int x, int w)
{
if (x+1<w)
{
return (a - b);
}
else
{
return 0.0f;
}
}
__device__ float aniso_cuda_diff_y(float a, float b, int y, int h)
{
if (y+1<h)
{
return (a - b);
}
else
{
return 0.0f;
}
}
__global__ void aniso_global_grad(float *imgIn, float *v1, float *v2, int w, int h, int nc, int n)
{
int ind = threadIdx.x + blockDim.x * blockIdx.x;
int x, y, ch;
ch = (int)(ind) / (int)(w*h);
y = (ind - ch*w*h) / (int)w;
x = (ind - ch*w*h) % (int)w;
if (ind<n)
{
v1[ind] = aniso_cuda_diff_x(imgIn[ind+1], imgIn[ind], x, w);
v2[ind] = aniso_cuda_diff_y(imgIn[ind+w], imgIn[ind], y, h);
}
}
__device__ float aniso_cuda_div_x(float a, float b, int x, int w)
{
if ((x+1<w) && (x>0))
{
return (a - b);
}
else if (x+1<w)
{
return (a - 0);
}
else if (x>0)
{
return (0 - b);
}
else
{
return 0.;
}
}
__device__ float aniso_cuda_div_y(float a, float b, int y, int h)
{
if ((y+1<h) && (y>0))
{
return (a - b);
}
else if (y+1<h)
{
return (a - 0);
}
else if (y>0)
{
return (0 - b);
}
else
{
return 0.;
}
}
__global__ void aniso_global_div(float *v1, float *v2, float *imgOut, int w, int h, int nc, int n)
{
int ind = threadIdx.x + blockDim.x * blockIdx.x;
int x, y, ch;
ch = (int)(ind) / (int)(w*h);
y = (ind - ch*w*h) / (int)w;
x = (ind - ch*w*h) % (int)w;
if ((ind<n) && (ind-w>=0) && (ind-1>=0))
{
imgOut[ind] = aniso_cuda_div_x(v1[ind], v1[ind-1], x, w) + aniso_cuda_div_y(v2[ind], v2[ind-w], y, h);
}
}
__global__ void aniso_global_norm(float *imgIn, float *imgOut, int w, int h, int n)
{
int ind = threadIdx.x + blockDim.x * blockIdx.x;
if (ind<n)
{
imgOut[ind] = imgIn[ind]*imgIn[ind];
//imgOut[ind] += imgIn[ind+w*h]*imgIn[ind+w*h];
//imgOut[ind] += imgIn[ind+2*w*h]*imgIn[ind+2*w*h];
imgOut[ind] = sqrtf(imgOut[ind]);
}
}
void aniso_diff(float *imgIn, int *imgDomain, float *imgOut, int w, int h, int nc, float tau, int N, dim3 grid, dim3 block)
{
float *v1 = new float[(size_t)w*h*nc];
float *v2 = new float[(size_t)w*h*nc];
float *divergence = new float[(size_t)w*h*nc];
float *diffusivity = new float[(size_t)w*h];
float *imgOutGray = new float[(size_t)w*h];
int n = w*h*nc;
for (int i=0; i<N; i++)
{
// allocate GPU memory
float *gpu_In, *gpu_v1, *gpu_v2, *gpu_Out, *gpu_Out_Gray;
int *gpu_Domain;
cudaMalloc(&gpu_In, n*sizeof(float));
CUDA_CHECK;
cudaMalloc(&gpu_v1, n*sizeof(float));
CUDA_CHECK;
cudaMalloc(&gpu_v2, n*sizeof(float));
CUDA_CHECK;
// copy host memory to device
cudaMemcpy(gpu_In, imgIn, n*sizeof(float), cudaMemcpyHostToDevice);
CUDA_CHECK;
cudaMemcpy(gpu_v1, v1, n*sizeof(float), cudaMemcpyHostToDevice);
CUDA_CHECK;
cudaMemcpy(gpu_v2, v2, n*sizeof(float), cudaMemcpyHostToDevice);
CUDA_CHECK;
// launch kernel
//dim3 block = dim3(128,1,1);
//dim3 grid = dim3((n + block.x - 1) / block.x, 1, 1);
aniso_global_grad <<<grid,block>>> (gpu_In, gpu_v1, gpu_v2, w, h, nc, n);
// copy result back to host (CPU) memory
cudaMemcpy(v1, gpu_v1, n * sizeof(float), cudaMemcpyDeviceToHost );
CUDA_CHECK;
cudaMemcpy(v2, gpu_v2, n * sizeof(float), cudaMemcpyDeviceToHost );
CUDA_CHECK;
// free device (GPU) memory
cudaFree(gpu_In);
CUDA_CHECK;
cudaFree(gpu_v1);
CUDA_CHECK;
cudaFree(gpu_v2);
CUDA_CHECK;
// Calculate diffusivity and multiply by gradient
// allocate GPU memory
float *gpu_diffusivity;
cudaMalloc(&gpu_v1, n*sizeof(float));
CUDA_CHECK;
cudaMalloc(&gpu_v2, n*sizeof(float));
CUDA_CHECK;
cudaMalloc(&gpu_diffusivity, w*h*sizeof(float));
CUDA_CHECK;
// copy host memory to device
cudaMemcpy(gpu_v1, v1, n*sizeof(float), cudaMemcpyHostToDevice);
CUDA_CHECK;
cudaMemcpy(gpu_v2, v2, n*sizeof(float), cudaMemcpyHostToDevice);
CUDA_CHECK;
cudaMemcpy(gpu_diffusivity, diffusivity, w*h*sizeof(float), cudaMemcpyHostToDevice);
CUDA_CHECK;
// launch kernel
global_diffusivity <<<grid,block>>> (gpu_v1, gpu_v2, gpu_diffusivity, w, h, nc, w*h);
mult_scal_vec <<<grid,block>>> (gpu_diffusivity, gpu_v1, w, h, nc, w*h);
mult_scal_vec <<<grid,block>>> (gpu_diffusivity, gpu_v2, w, h, nc, w*h);
// copy result back to host (CPU) memory
cudaMemcpy(diffusivity, gpu_diffusivity, w*h * sizeof(float), cudaMemcpyDeviceToHost );
CUDA_CHECK;
// copy result back to host (CPU) memory
cudaMemcpy(v1, gpu_v1, n * sizeof(float), cudaMemcpyDeviceToHost );
CUDA_CHECK;
cudaMemcpy(v2, gpu_v2, n * sizeof(float), cudaMemcpyDeviceToHost );
CUDA_CHECK;
// free device (GPU) memory
cudaFree(gpu_diffusivity);
CUDA_CHECK;
cudaFree(gpu_v1);
CUDA_CHECK;
cudaFree(gpu_v2);
CUDA_CHECK;
// Calculate divergence of a gradient
// allocate GPU memory
float *gpu_divergence;
cudaMalloc(&gpu_v1, n*sizeof(float));
CUDA_CHECK;
cudaMalloc(&gpu_v2, n*sizeof(float));
CUDA_CHECK;
cudaMalloc(&gpu_divergence, n*sizeof(float));
CUDA_CHECK;
// copy host memory to device
cudaMemcpy(gpu_v1, v1, n*sizeof(float), cudaMemcpyHostToDevice);
CUDA_CHECK;
cudaMemcpy(gpu_v2, v2, n*sizeof(float), cudaMemcpyHostToDevice);
CUDA_CHECK;
cudaMemcpy(gpu_divergence, divergence, n*sizeof(float), cudaMemcpyHostToDevice);
CUDA_CHECK;
// launch kernel
//dim3 block = dim3(128,1,1);
//dim3 grid = dim3((n + block.x - 1) / block.x, 1, 1);
aniso_global_div <<<grid,block>>> (gpu_v1, gpu_v2, gpu_divergence, w, h, nc, n);
// copy result back to host (CPU) memory
cudaMemcpy(divergence, gpu_divergence, n * sizeof(float), cudaMemcpyDeviceToHost );
CUDA_CHECK;
// free device (GPU) memory
cudaFree(gpu_v1);
CUDA_CHECK;
cudaFree(gpu_v2);
CUDA_CHECK;
cudaFree(gpu_divergence);
CUDA_CHECK;
// Update image
// allocate GPU memory
//float *gpu_In, *gpu_Out_Gray;
cudaMalloc(&gpu_In, n*sizeof(float));
CUDA_CHECK;
cudaMalloc(&gpu_Out, n*sizeof(float));
CUDA_CHECK;
cudaMalloc(&gpu_divergence, n*sizeof(float));
CUDA_CHECK;
cudaMalloc(&gpu_Domain, n*sizeof(int));
CUDA_CHECK;
// copy host memory to device
cudaMemcpy(gpu_In, imgIn, n*sizeof(float), cudaMemcpyHostToDevice);
CUDA_CHECK;
cudaMemcpy(gpu_divergence, divergence, n*sizeof(float), cudaMemcpyHostToDevice);
CUDA_CHECK;
cudaMemcpy(gpu_Domain, imgDomain, n*sizeof(int), cudaMemcpyHostToDevice);
CUDA_CHECK;
// launch kernel
update_aniso_diff <<<grid,block>>> (gpu_In, gpu_divergence, gpu_Domain, gpu_Out, tau, w, h, nc, n);
// copy result back to host (CPU) memory
cudaMemcpy(imgOut, gpu_Out, n * sizeof(float), cudaMemcpyDeviceToHost );
CUDA_CHECK;
cudaMemcpy(imgIn, gpu_Out, n * sizeof(float), cudaMemcpyDeviceToHost );
CUDA_CHECK;
// free device (GPU) memory
cudaFree(gpu_In);
CUDA_CHECK;
cudaFree(gpu_divergence);
CUDA_CHECK;
cudaFree(gpu_Domain);
CUDA_CHECK;
cudaFree(gpu_Out);
CUDA_CHECK;
}
delete[] v1;
delete[] v2;
delete[] diffusivity;
delete[] divergence;
}
|
21200685a74fb7d5890058b4034051091aa36dd7.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <hip/hip_runtime.h>
#define THREADS_PER_BLOCK 512
#define NUM_BLOCKS 512
/* used for debugging */
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, char *file, int line, bool abort=true) {
if(code != hipSuccess) {
fprintf(stderr, "GPUassert: %s, %s, %d\n", hipGetErrorString(code), file, line);
if(abort) exit(code);
}
}
/* function declarations */
unsigned int getmax(unsigned int *, unsigned int);
__global__ void getmaxcu(unsigned int *, unsigned int *, unsigned int);
int main(int argc, char *argv[])
{
unsigned int size = 0; // The size of the array
unsigned int i; // loop index
unsigned int * numbers; //pointer to the array
if(argc !=2)
{
printf("usage: maxseq num\n");
printf("num = size of the array\n");
exit(1);
}
size = atol(argv[1]);
numbers = (unsigned int *)malloc(size * sizeof(unsigned int));
if( !numbers )
{
printf("Unable to allocate mem for an array of size %u\n", size);
exit(1);
}
srand(time(NULL)); // setting a seed for the random number generator
// Fill-up the array with random numbers from 0 to size-1
for( i = 0; i < size; i++)
numbers[i] = rand() % size;
/* define the number of blocks, a host array for the maxes, and device arrays */
unsigned int *maxes = (unsigned int *)malloc(NUM_BLOCKS * sizeof(unsigned int));
unsigned int *dev_num, *dev_maxes;
/*allocate space on the device */
gpuErrchk(hipMalloc((void**)&dev_num, size * sizeof(unsigned int)));
gpuErrchk(hipMalloc((void**)&dev_maxes, NUM_BLOCKS * sizeof(unsigned int)));
/*do our copies and execute the kernel */
gpuErrchk(hipMemcpy(dev_num, numbers, size * sizeof(unsigned int), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( getmaxcu), dim3(NUM_BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, dev_num, dev_maxes, size);
gpuErrchk(hipPeekAtLastError()); //debug info
gpuErrchk(hipDeviceSynchronize());
gpuErrchk(hipMemcpy(maxes, dev_maxes, NUM_BLOCKS * sizeof(unsigned int), hipMemcpyDeviceToHost));
/* free space on the device */
hipFree(dev_num);
hipFree(dev_maxes);
/* the final max calculation is done on the host
* at this point we have few enough values that using the gpu is not necessary */
unsigned int overall_max = 0;
for(i = 0; i < NUM_BLOCKS; ++i) {
if(overall_max < maxes[i])
overall_max = maxes[i];
}
printf(" The maximum number in the array is: %u\n", overall_max);
free(numbers);
free(maxes);
exit(0);
}
/*
input: pointer to an array of long int
number of elements in the array
output: the maximum number of the array
*/
unsigned int getmax(unsigned int num[], unsigned int size)
{
unsigned int i;
unsigned int max = num[0];
for(i = 1; i < size; i++)
if(num[i] > max)
max = num[i];
return( max );
}
__global__ void getmaxcu(unsigned int * g_idata, unsigned int * g_odata, unsigned int size) {
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
unsigned int i = bid * blockDim.x + tid;
/* find the maximum value of each block using a reduction */
if(i < size) {
unsigned int stride;
for(stride = THREADS_PER_BLOCK / 2; stride > 0; stride >>= 1) {
if(tid < stride) {
if(g_idata[tid] < g_idata[tid + stride])
g_idata[tid] = g_idata[tid + stride];
}
}
}
__syncthreads();
/*write the result of each block to the output array */
if(tid == 0)
g_odata[bid] = g_idata[0];
} | 21200685a74fb7d5890058b4034051091aa36dd7.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
#define THREADS_PER_BLOCK 512
#define NUM_BLOCKS 512
/* used for debugging */
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true) {
if(code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s, %s, %d\n", cudaGetErrorString(code), file, line);
if(abort) exit(code);
}
}
/* function declarations */
unsigned int getmax(unsigned int *, unsigned int);
__global__ void getmaxcu(unsigned int *, unsigned int *, unsigned int);
int main(int argc, char *argv[])
{
unsigned int size = 0; // The size of the array
unsigned int i; // loop index
unsigned int * numbers; //pointer to the array
if(argc !=2)
{
printf("usage: maxseq num\n");
printf("num = size of the array\n");
exit(1);
}
size = atol(argv[1]);
numbers = (unsigned int *)malloc(size * sizeof(unsigned int));
if( !numbers )
{
printf("Unable to allocate mem for an array of size %u\n", size);
exit(1);
}
srand(time(NULL)); // setting a seed for the random number generator
// Fill-up the array with random numbers from 0 to size-1
for( i = 0; i < size; i++)
numbers[i] = rand() % size;
/* define the number of blocks, a host array for the maxes, and device arrays */
unsigned int *maxes = (unsigned int *)malloc(NUM_BLOCKS * sizeof(unsigned int));
unsigned int *dev_num, *dev_maxes;
/*allocate space on the device */
gpuErrchk(cudaMalloc((void**)&dev_num, size * sizeof(unsigned int)));
gpuErrchk(cudaMalloc((void**)&dev_maxes, NUM_BLOCKS * sizeof(unsigned int)));
/*do our copies and execute the kernel */
gpuErrchk(cudaMemcpy(dev_num, numbers, size * sizeof(unsigned int), cudaMemcpyHostToDevice));
getmaxcu<<<NUM_BLOCKS, THREADS_PER_BLOCK>>>(dev_num, dev_maxes, size);
gpuErrchk(cudaPeekAtLastError()); //debug info
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(maxes, dev_maxes, NUM_BLOCKS * sizeof(unsigned int), cudaMemcpyDeviceToHost));
/* free space on the device */
cudaFree(dev_num);
cudaFree(dev_maxes);
/* the final max calculation is done on the host
* at this point we have few enough values that using the gpu is not necessary */
unsigned int overall_max = 0;
for(i = 0; i < NUM_BLOCKS; ++i) {
if(overall_max < maxes[i])
overall_max = maxes[i];
}
printf(" The maximum number in the array is: %u\n", overall_max);
free(numbers);
free(maxes);
exit(0);
}
/*
input: pointer to an array of long int
number of elements in the array
output: the maximum number of the array
*/
unsigned int getmax(unsigned int num[], unsigned int size)
{
unsigned int i;
unsigned int max = num[0];
for(i = 1; i < size; i++)
if(num[i] > max)
max = num[i];
return( max );
}
__global__ void getmaxcu(unsigned int * g_idata, unsigned int * g_odata, unsigned int size) {
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
unsigned int i = bid * blockDim.x + tid;
/* find the maximum value of each block using a reduction */
if(i < size) {
unsigned int stride;
for(stride = THREADS_PER_BLOCK / 2; stride > 0; stride >>= 1) {
if(tid < stride) {
if(g_idata[tid] < g_idata[tid + stride])
g_idata[tid] = g_idata[tid + stride];
}
}
}
__syncthreads();
/*write the result of each block to the output array */
if(tid == 0)
g_odata[bid] = g_idata[0];
} |
8ab80bee758c906840a3c6d97e02c92dff684819.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "roi_pooling_op.hpp"
namespace Shadow {
namespace Vision {
template <typename T>
__global__ void KernelPOIPooling(const T *in_data, int count, const T *roi_data,
int in_c, int in_h, int in_w, int pooled_h,
int pooled_w, float spatial_scale,
T *out_data) {
CUDA_KERNEL_LOOP(globalid, count) {
int pw = globalid % pooled_w;
int ph = (globalid / pooled_w) % pooled_h;
int c = (globalid / pooled_w / pooled_h) % in_c;
int n = globalid / pooled_w / pooled_h / in_c;
roi_data += n * 5;
int roi_batch_id = static_cast<int>(roi_data[0]);
int roi_start_w = static_cast<int>(round(roi_data[1] * spatial_scale));
int roi_start_h = static_cast<int>(round(roi_data[2] * spatial_scale));
int roi_end_w = static_cast<int>(round(roi_data[3] * spatial_scale));
int roi_end_h = static_cast<int>(round(roi_data[4] * spatial_scale));
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
T bin_size_h = roi_height / static_cast<T>(pooled_h);
T bin_size_w = roi_width / static_cast<T>(pooled_w);
int hstart = static_cast<int>(floor(ph * bin_size_h));
int wstart = static_cast<int>(floor(pw * bin_size_w));
int hend = static_cast<int>(ceil((ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil((pw + 1) * bin_size_w));
hstart = min(max(hstart + roi_start_h, 0), in_h);
hend = min(max(hend + roi_start_h, 0), in_h);
wstart = min(max(wstart + roi_start_w, 0), in_w);
wend = min(max(wend + roi_start_w, 0), in_w);
bool is_empty = (hend <= hstart) || (wend <= wstart);
in_data += (roi_batch_id * in_c + c) * in_h * in_w;
T max_val = is_empty ? 0 : in_data[hstart * in_w + wstart];
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
max_val = max(max_val, in_data[h * in_w + w]);
}
}
out_data[globalid] = max_val;
}
}
template <typename T>
void ROIPooling(const T *in_data, const VecInt &in_shape, const T *roi_data,
int num_rois, int pooled_h, int pooled_w, float spatial_scale,
T *out_data, Context *context) {
int in_c = in_shape[1], in_h = in_shape[2], in_w = in_shape[3];
int count = num_rois * in_c * pooled_h * pooled_w;
hipLaunchKernelGGL(( KernelPOIPooling<T>), dim3(GetBlocks(count)), dim3(NumThreads), 0,
hipStream_t(context->cuda_stream()),
in_data, count, roi_data, in_c, in_h, in_w, pooled_h, pooled_w,
spatial_scale, out_data);
CUDA_CHECK(hipPeekAtLastError());
}
template void ROIPooling(const float *, const VecInt &, const float *, int, int,
int, float, float *, Context *);
} // namespace Vision
} // namespace Shadow
| 8ab80bee758c906840a3c6d97e02c92dff684819.cu | #include "roi_pooling_op.hpp"
namespace Shadow {
namespace Vision {
template <typename T>
__global__ void KernelPOIPooling(const T *in_data, int count, const T *roi_data,
int in_c, int in_h, int in_w, int pooled_h,
int pooled_w, float spatial_scale,
T *out_data) {
CUDA_KERNEL_LOOP(globalid, count) {
int pw = globalid % pooled_w;
int ph = (globalid / pooled_w) % pooled_h;
int c = (globalid / pooled_w / pooled_h) % in_c;
int n = globalid / pooled_w / pooled_h / in_c;
roi_data += n * 5;
int roi_batch_id = static_cast<int>(roi_data[0]);
int roi_start_w = static_cast<int>(round(roi_data[1] * spatial_scale));
int roi_start_h = static_cast<int>(round(roi_data[2] * spatial_scale));
int roi_end_w = static_cast<int>(round(roi_data[3] * spatial_scale));
int roi_end_h = static_cast<int>(round(roi_data[4] * spatial_scale));
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
T bin_size_h = roi_height / static_cast<T>(pooled_h);
T bin_size_w = roi_width / static_cast<T>(pooled_w);
int hstart = static_cast<int>(floor(ph * bin_size_h));
int wstart = static_cast<int>(floor(pw * bin_size_w));
int hend = static_cast<int>(ceil((ph + 1) * bin_size_h));
int wend = static_cast<int>(ceil((pw + 1) * bin_size_w));
hstart = min(max(hstart + roi_start_h, 0), in_h);
hend = min(max(hend + roi_start_h, 0), in_h);
wstart = min(max(wstart + roi_start_w, 0), in_w);
wend = min(max(wend + roi_start_w, 0), in_w);
bool is_empty = (hend <= hstart) || (wend <= wstart);
in_data += (roi_batch_id * in_c + c) * in_h * in_w;
T max_val = is_empty ? 0 : in_data[hstart * in_w + wstart];
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
max_val = max(max_val, in_data[h * in_w + w]);
}
}
out_data[globalid] = max_val;
}
}
template <typename T>
void ROIPooling(const T *in_data, const VecInt &in_shape, const T *roi_data,
int num_rois, int pooled_h, int pooled_w, float spatial_scale,
T *out_data, Context *context) {
int in_c = in_shape[1], in_h = in_shape[2], in_w = in_shape[3];
int count = num_rois * in_c * pooled_h * pooled_w;
KernelPOIPooling<T><<<GetBlocks(count), NumThreads, 0,
cudaStream_t(context->cuda_stream())>>>(
in_data, count, roi_data, in_c, in_h, in_w, pooled_h, pooled_w,
spatial_scale, out_data);
CUDA_CHECK(cudaPeekAtLastError());
}
template void ROIPooling(const float *, const VecInt &, const float *, int, int,
int, float, float *, Context *);
} // namespace Vision
} // namespace Shadow
|
3383476ea13350c445d85ce44bd4b485a2b5a49f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//---------------------------------*-CUDA-*----------------------------------//
// Copyright 2020 UT-Battelle, LLC, and other Celeritas developers.
// See the top-level COPYRIGHT file for details.
// SPDX-License-Identifier: (Apache-2.0 OR MIT)
//---------------------------------------------------------------------------//
//! \file RDemoKernel.cu
//---------------------------------------------------------------------------//
#include "RDemoKernel.hh"
#include "base/Assert.hh"
#include "base/KernelParamCalculator.cuda.hh"
#include "geometry/GeoTrackView.hh"
#include "ImageTrackView.hh"
#include <cmath>
using namespace celeritas;
using namespace demo_rasterizer;
namespace demo_rasterizer
{
namespace
{
//---------------------------------------------------------------------------//
// KERNELS
//---------------------------------------------------------------------------//
__device__ int geo_id(const GeoTrackView& geo)
{
if (geo.is_outside())
return -1;
return geo.volume_id().get();
}
__global__ void trace_kernel(const GeoParamsCRefDevice geo_params,
const GeoStateRefDevice geo_state,
const ImagePointers image_state)
{
auto tid = celeritas::KernelParamCalculator::thread_id();
if (tid.get() >= image_state.dims[0])
return;
ImageTrackView image(image_state, tid);
GeoTrackView geo(geo_params, geo_state, tid);
// Start track at the leftmost point in the requested direction
geo = GeoTrackInitializer{image.start_pos(), image.start_dir()};
int cur_id = geo_id(geo);
real_type geo_dist = std::fmin(
geo.next_step(), image_state.dims[1] * image_state.pixel_width);
// Track along each pixel
for (unsigned int i = 0; i < image_state.dims[1]; ++i)
{
real_type pix_dist = image_state.pixel_width;
real_type max_dist = 0;
int max_id = cur_id;
while (geo_dist <= pix_dist)
{
// Move to geometry boundary
pix_dist -= geo_dist;
if (max_id == cur_id)
{
max_dist += geo_dist;
}
else if (geo_dist > max_dist)
{
max_dist = geo_dist;
max_id = cur_id;
}
// Cross surface
geo.move_next_step();
cur_id = geo_id(geo);
geo_dist = std::fmin(geo.next_step(),
image_state.dims[1] * image_state.pixel_width);
}
// Move to pixel boundary
geo_dist -= pix_dist;
if (pix_dist > max_dist)
{
max_dist = pix_dist;
max_id = cur_id;
}
image.set_pixel(i, max_id);
}
}
} // namespace
//---------------------------------------------------------------------------//
// KERNEL INTERFACE
//---------------------------------------------------------------------------//
void trace(const GeoParamsCRefDevice& geo_params,
const GeoStateRefDevice& geo_state,
const ImagePointers& image)
{
CELER_EXPECT(image);
static const KernelParamCalculator calc_kernel_params(trace_kernel,
"trace");
auto params = calc_kernel_params(image.dims[0]);
hipLaunchKernelGGL(( trace_kernel), dim3(params.grid_size), dim3(params.block_size), 0, 0,
geo_params, geo_state, image);
CELER_CUDA_CHECK_ERROR();
CELER_CUDA_CALL(hipDeviceSynchronize());
}
//---------------------------------------------------------------------------//
} // namespace demo_rasterizer
| 3383476ea13350c445d85ce44bd4b485a2b5a49f.cu | //---------------------------------*-CUDA-*----------------------------------//
// Copyright 2020 UT-Battelle, LLC, and other Celeritas developers.
// See the top-level COPYRIGHT file for details.
// SPDX-License-Identifier: (Apache-2.0 OR MIT)
//---------------------------------------------------------------------------//
//! \file RDemoKernel.cu
//---------------------------------------------------------------------------//
#include "RDemoKernel.hh"
#include "base/Assert.hh"
#include "base/KernelParamCalculator.cuda.hh"
#include "geometry/GeoTrackView.hh"
#include "ImageTrackView.hh"
#include <cmath>
using namespace celeritas;
using namespace demo_rasterizer;
namespace demo_rasterizer
{
namespace
{
//---------------------------------------------------------------------------//
// KERNELS
//---------------------------------------------------------------------------//
__device__ int geo_id(const GeoTrackView& geo)
{
if (geo.is_outside())
return -1;
return geo.volume_id().get();
}
__global__ void trace_kernel(const GeoParamsCRefDevice geo_params,
const GeoStateRefDevice geo_state,
const ImagePointers image_state)
{
auto tid = celeritas::KernelParamCalculator::thread_id();
if (tid.get() >= image_state.dims[0])
return;
ImageTrackView image(image_state, tid);
GeoTrackView geo(geo_params, geo_state, tid);
// Start track at the leftmost point in the requested direction
geo = GeoTrackInitializer{image.start_pos(), image.start_dir()};
int cur_id = geo_id(geo);
real_type geo_dist = std::fmin(
geo.next_step(), image_state.dims[1] * image_state.pixel_width);
// Track along each pixel
for (unsigned int i = 0; i < image_state.dims[1]; ++i)
{
real_type pix_dist = image_state.pixel_width;
real_type max_dist = 0;
int max_id = cur_id;
while (geo_dist <= pix_dist)
{
// Move to geometry boundary
pix_dist -= geo_dist;
if (max_id == cur_id)
{
max_dist += geo_dist;
}
else if (geo_dist > max_dist)
{
max_dist = geo_dist;
max_id = cur_id;
}
// Cross surface
geo.move_next_step();
cur_id = geo_id(geo);
geo_dist = std::fmin(geo.next_step(),
image_state.dims[1] * image_state.pixel_width);
}
// Move to pixel boundary
geo_dist -= pix_dist;
if (pix_dist > max_dist)
{
max_dist = pix_dist;
max_id = cur_id;
}
image.set_pixel(i, max_id);
}
}
} // namespace
//---------------------------------------------------------------------------//
// KERNEL INTERFACE
//---------------------------------------------------------------------------//
void trace(const GeoParamsCRefDevice& geo_params,
const GeoStateRefDevice& geo_state,
const ImagePointers& image)
{
CELER_EXPECT(image);
static const KernelParamCalculator calc_kernel_params(trace_kernel,
"trace");
auto params = calc_kernel_params(image.dims[0]);
trace_kernel<<<params.grid_size, params.block_size>>>(
geo_params, geo_state, image);
CELER_CUDA_CHECK_ERROR();
CELER_CUDA_CALL(cudaDeviceSynchronize());
}
//---------------------------------------------------------------------------//
} // namespace demo_rasterizer
|
933249a2bad46e56e23f3b0fc382012153e408cd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017 Thomas Heller
//
// SPDX-License-Identifier: BSL-1.0
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#include <hpx/config.hpp>
// NVCC fails unceremoniously with this test at least until V12.1
#if !defined(HPX_CUDA_VERSION) || (HPX_CUDA_VERSION > 1201)
#include <hpx/chrono.hpp>
#include <hpx/execution.hpp>
#include <hpx/init.hpp>
#include <hpx/modules/async_cuda.hpp>
#include <cstddef>
#include <iostream>
__global__ void dummy() {}
int hpx_main(hpx::program_options::variables_map& vm)
{
std::size_t const iterations = vm["iterations"].as<std::size_t>();
std::size_t const batch_size = 10;
std::size_t const batch_iterations = iterations / batch_size;
std::size_t const non_batch_iterations = iterations % batch_size;
hipStream_t cuda_stream;
hpx::cuda::experimental::check_cuda_error(hipStreamCreate(&cuda_stream));
// Warmup
{
hpx::chrono::high_resolution_timer timer;
for (std::size_t i = 0; i != iterations; ++i)
{
hipLaunchKernelGGL(( dummy), dim3(1), dim3(1), 0, cuda_stream, );
hpx::cuda::experimental::check_cuda_error(
hipStreamSynchronize(cuda_stream));
}
double elapsed = timer.elapsed();
std::cout
<< "native + synchronize (warmup): "
<< elapsed << '\n';
}
{
hpx::chrono::high_resolution_timer timer;
for (std::size_t i = 0; i != iterations; ++i)
{
hipLaunchKernelGGL(( dummy), dim3(1), dim3(1), 0, cuda_stream, );
hpx::cuda::experimental::check_cuda_error(
hipStreamSynchronize(cuda_stream));
}
double elapsed = timer.elapsed();
std::cout
<< "native + synchronize: "
<< elapsed << '\n';
}
{
hpx::chrono::high_resolution_timer timer;
for (std::size_t i = 0; i < batch_iterations; ++i)
{
for (std::size_t b = 0; b < batch_size; ++b)
{
hipLaunchKernelGGL(( dummy), dim3(1), dim3(1), 0, cuda_stream, );
}
hpx::cuda::experimental::check_cuda_error(
hipStreamSynchronize(cuda_stream));
}
for (std::size_t i = 0; i < non_batch_iterations; ++i)
{
hipLaunchKernelGGL(( dummy), dim3(1), dim3(1), 0, cuda_stream, );
}
hpx::cuda::experimental::check_cuda_error(
hipStreamSynchronize(cuda_stream));
double elapsed = timer.elapsed();
std::cout
<< "native + synchronize batched: "
<< elapsed << '\n';
}
{
hpx::cuda::experimental::enable_user_polling poll("default");
namespace ex = hpx::execution::experimental;
namespace cu = hpx::cuda::experimental;
namespace tt = hpx::this_thread::experimental;
auto const f = [](hipStream_t cuda_stream) {
hipLaunchKernelGGL(( dummy), dim3(1), dim3(1), 0, cuda_stream, );
};
hpx::chrono::high_resolution_timer timer;
for (std::size_t i = 0; i != iterations; ++i)
{
cu::transform_stream(ex::just(), f, cuda_stream) | tt::sync_wait();
}
double elapsed = timer.elapsed();
std::cout
<< "transform_stream: "
<< elapsed << '\n';
}
{
hpx::cuda::experimental::enable_user_polling poll("default");
namespace ex = hpx::execution::experimental;
namespace cu = hpx::cuda::experimental;
namespace tt = hpx::this_thread::experimental;
auto const f = [](hipStream_t cuda_stream) {
hipLaunchKernelGGL(( dummy), dim3(1), dim3(1), 0, cuda_stream, );
};
hpx::chrono::high_resolution_timer timer;
for (std::size_t i = 0; i < batch_iterations; ++i)
{
// We have to manually unroll this loop, because the type of the
// sender changes for each additional transform_stream call. The
// number of unrolled calls must match batch_size above.
cu::transform_stream(ex::just(), f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) | tt::sync_wait();
}
// Do the remainder one-by-one
for (std::size_t i = 0; i < non_batch_iterations; ++i)
{
cu::transform_stream(ex::just(), f, cuda_stream) | tt::sync_wait();
}
double elapsed = timer.elapsed();
std::cout
<< "transform_stream batched: "
<< elapsed << '\n';
}
{
hpx::cuda::experimental::enable_user_polling poll("default");
namespace ex = hpx::execution::experimental;
namespace cu = hpx::cuda::experimental;
namespace tt = hpx::this_thread::experimental;
auto const f = [](hipStream_t cuda_stream) {
hipLaunchKernelGGL(( dummy), dim3(1), dim3(1), 0, cuda_stream, );
};
hpx::chrono::high_resolution_timer timer;
for (std::size_t i = 0; i < batch_iterations; ++i)
{
// We have to manually unroll this loop, because the type of the
// sender changes for each additional transform_stream call. The
// number of unrolled calls must match batch_size above. Here we
// intentionally insert dummy then([]{}) calls between the
// transform_stream calls to force synchronization between the
// kernel launches.
cu::transform_stream(ex::just(), f, cuda_stream) | ex::then([] {}) |
cu::transform_stream(f, cuda_stream) | ex::then([] {}) |
cu::transform_stream(f, cuda_stream) | ex::then([] {}) |
cu::transform_stream(f, cuda_stream) | ex::then([] {}) |
cu::transform_stream(f, cuda_stream) | ex::then([] {}) |
cu::transform_stream(f, cuda_stream) | ex::then([] {}) |
cu::transform_stream(f, cuda_stream) | ex::then([] {}) |
cu::transform_stream(f, cuda_stream) | ex::then([] {}) |
cu::transform_stream(f, cuda_stream) | ex::then([] {}) |
cu::transform_stream(f, cuda_stream) | tt::sync_wait();
}
// Do the remainder one-by-one
for (std::size_t i = 0; i < non_batch_iterations; ++i)
{
cu::transform_stream(ex::just(), f, cuda_stream) | tt::sync_wait();
}
double elapsed = timer.elapsed();
std::cout
<< "transform_stream force synchronize batched: "
<< elapsed << '\n';
}
{
hpx::cuda::experimental::enable_user_polling poll("default");
namespace ex = hpx::execution::experimental;
namespace cu = hpx::cuda::experimental;
namespace tt = hpx::this_thread::experimental;
auto const f = [](hipStream_t cuda_stream) {
hipLaunchKernelGGL(( dummy), dim3(1), dim3(1), 0, cuda_stream, );
};
hpx::chrono::high_resolution_timer timer;
for (std::size_t i = 0; i != iterations; ++i)
{
cu::transform_stream(ex::just(), f, cuda_stream) |
ex::transfer(ex::thread_pool_scheduler{}) | tt::sync_wait();
}
double elapsed = timer.elapsed();
std::cout
<< "transform_stream with transfer: "
<< elapsed << '\n';
}
{
hpx::cuda::experimental::enable_user_polling poll("default");
namespace ex = hpx::execution::experimental;
namespace cu = hpx::cuda::experimental;
namespace tt = hpx::this_thread::experimental;
auto const f = [](hipStream_t cuda_stream) {
hipLaunchKernelGGL(( dummy), dim3(1), dim3(1), 0, cuda_stream, );
};
hpx::chrono::high_resolution_timer timer;
for (std::size_t i = 0; i < batch_iterations; ++i)
{
// We have to manually unroll this loop, because the type of the
// sender changes for each additional transform_stream call. The
// number of unrolled calls must match batch_size above.
cu::transform_stream(ex::just(), f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
ex::transfer(ex::thread_pool_scheduler{}) | tt::sync_wait();
}
// Do the remainder one-by-one
for (std::size_t i = 0; i < non_batch_iterations; ++i)
{
cu::transform_stream(ex::just(), f, cuda_stream) |
ex::transfer(ex::thread_pool_scheduler{}) | tt::sync_wait();
}
double elapsed = timer.elapsed();
std::cout
<< "transform_stream with transfer batched: "
<< elapsed << '\n';
}
hpx::cuda::experimental::check_cuda_error(hipStreamDestroy(cuda_stream));
return hpx::local::finalize();
}
int main(int argc, char* argv[])
{
using namespace hpx::program_options;
options_description cmdline("usage: " HPX_APPLICATION_STRING " [options]");
cmdline.add_options()("iterations",
hpx::program_options::value<std::size_t>()->default_value(1024),
"number of iterations (default: 1024)");
hpx::local::init_params init_args;
init_args.desc_cmdline = cmdline;
return hpx::local::init(hpx_main, argc, argv, init_args);
}
#else
int main(int, char*[])
{
return 0;
}
#endif
| 933249a2bad46e56e23f3b0fc382012153e408cd.cu | // Copyright (c) 2017 Thomas Heller
//
// SPDX-License-Identifier: BSL-1.0
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#include <hpx/config.hpp>
// NVCC fails unceremoniously with this test at least until V12.1
#if !defined(HPX_CUDA_VERSION) || (HPX_CUDA_VERSION > 1201)
#include <hpx/chrono.hpp>
#include <hpx/execution.hpp>
#include <hpx/init.hpp>
#include <hpx/modules/async_cuda.hpp>
#include <cstddef>
#include <iostream>
__global__ void dummy() {}
int hpx_main(hpx::program_options::variables_map& vm)
{
std::size_t const iterations = vm["iterations"].as<std::size_t>();
std::size_t const batch_size = 10;
std::size_t const batch_iterations = iterations / batch_size;
std::size_t const non_batch_iterations = iterations % batch_size;
cudaStream_t cuda_stream;
hpx::cuda::experimental::check_cuda_error(cudaStreamCreate(&cuda_stream));
// Warmup
{
hpx::chrono::high_resolution_timer timer;
for (std::size_t i = 0; i != iterations; ++i)
{
dummy<<<1, 1, 0, cuda_stream>>>();
hpx::cuda::experimental::check_cuda_error(
cudaStreamSynchronize(cuda_stream));
}
double elapsed = timer.elapsed();
std::cout
<< "native + synchronize (warmup): "
<< elapsed << '\n';
}
{
hpx::chrono::high_resolution_timer timer;
for (std::size_t i = 0; i != iterations; ++i)
{
dummy<<<1, 1, 0, cuda_stream>>>();
hpx::cuda::experimental::check_cuda_error(
cudaStreamSynchronize(cuda_stream));
}
double elapsed = timer.elapsed();
std::cout
<< "native + synchronize: "
<< elapsed << '\n';
}
{
hpx::chrono::high_resolution_timer timer;
for (std::size_t i = 0; i < batch_iterations; ++i)
{
for (std::size_t b = 0; b < batch_size; ++b)
{
dummy<<<1, 1, 0, cuda_stream>>>();
}
hpx::cuda::experimental::check_cuda_error(
cudaStreamSynchronize(cuda_stream));
}
for (std::size_t i = 0; i < non_batch_iterations; ++i)
{
dummy<<<1, 1, 0, cuda_stream>>>();
}
hpx::cuda::experimental::check_cuda_error(
cudaStreamSynchronize(cuda_stream));
double elapsed = timer.elapsed();
std::cout
<< "native + synchronize batched: "
<< elapsed << '\n';
}
{
hpx::cuda::experimental::enable_user_polling poll("default");
namespace ex = hpx::execution::experimental;
namespace cu = hpx::cuda::experimental;
namespace tt = hpx::this_thread::experimental;
auto const f = [](cudaStream_t cuda_stream) {
dummy<<<1, 1, 0, cuda_stream>>>();
};
hpx::chrono::high_resolution_timer timer;
for (std::size_t i = 0; i != iterations; ++i)
{
cu::transform_stream(ex::just(), f, cuda_stream) | tt::sync_wait();
}
double elapsed = timer.elapsed();
std::cout
<< "transform_stream: "
<< elapsed << '\n';
}
{
hpx::cuda::experimental::enable_user_polling poll("default");
namespace ex = hpx::execution::experimental;
namespace cu = hpx::cuda::experimental;
namespace tt = hpx::this_thread::experimental;
auto const f = [](cudaStream_t cuda_stream) {
dummy<<<1, 1, 0, cuda_stream>>>();
};
hpx::chrono::high_resolution_timer timer;
for (std::size_t i = 0; i < batch_iterations; ++i)
{
// We have to manually unroll this loop, because the type of the
// sender changes for each additional transform_stream call. The
// number of unrolled calls must match batch_size above.
cu::transform_stream(ex::just(), f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) | tt::sync_wait();
}
// Do the remainder one-by-one
for (std::size_t i = 0; i < non_batch_iterations; ++i)
{
cu::transform_stream(ex::just(), f, cuda_stream) | tt::sync_wait();
}
double elapsed = timer.elapsed();
std::cout
<< "transform_stream batched: "
<< elapsed << '\n';
}
{
hpx::cuda::experimental::enable_user_polling poll("default");
namespace ex = hpx::execution::experimental;
namespace cu = hpx::cuda::experimental;
namespace tt = hpx::this_thread::experimental;
auto const f = [](cudaStream_t cuda_stream) {
dummy<<<1, 1, 0, cuda_stream>>>();
};
hpx::chrono::high_resolution_timer timer;
for (std::size_t i = 0; i < batch_iterations; ++i)
{
// We have to manually unroll this loop, because the type of the
// sender changes for each additional transform_stream call. The
// number of unrolled calls must match batch_size above. Here we
// intentionally insert dummy then([]{}) calls between the
// transform_stream calls to force synchronization between the
// kernel launches.
cu::transform_stream(ex::just(), f, cuda_stream) | ex::then([] {}) |
cu::transform_stream(f, cuda_stream) | ex::then([] {}) |
cu::transform_stream(f, cuda_stream) | ex::then([] {}) |
cu::transform_stream(f, cuda_stream) | ex::then([] {}) |
cu::transform_stream(f, cuda_stream) | ex::then([] {}) |
cu::transform_stream(f, cuda_stream) | ex::then([] {}) |
cu::transform_stream(f, cuda_stream) | ex::then([] {}) |
cu::transform_stream(f, cuda_stream) | ex::then([] {}) |
cu::transform_stream(f, cuda_stream) | ex::then([] {}) |
cu::transform_stream(f, cuda_stream) | tt::sync_wait();
}
// Do the remainder one-by-one
for (std::size_t i = 0; i < non_batch_iterations; ++i)
{
cu::transform_stream(ex::just(), f, cuda_stream) | tt::sync_wait();
}
double elapsed = timer.elapsed();
std::cout
<< "transform_stream force synchronize batched: "
<< elapsed << '\n';
}
{
hpx::cuda::experimental::enable_user_polling poll("default");
namespace ex = hpx::execution::experimental;
namespace cu = hpx::cuda::experimental;
namespace tt = hpx::this_thread::experimental;
auto const f = [](cudaStream_t cuda_stream) {
dummy<<<1, 1, 0, cuda_stream>>>();
};
hpx::chrono::high_resolution_timer timer;
for (std::size_t i = 0; i != iterations; ++i)
{
cu::transform_stream(ex::just(), f, cuda_stream) |
ex::transfer(ex::thread_pool_scheduler{}) | tt::sync_wait();
}
double elapsed = timer.elapsed();
std::cout
<< "transform_stream with transfer: "
<< elapsed << '\n';
}
{
hpx::cuda::experimental::enable_user_polling poll("default");
namespace ex = hpx::execution::experimental;
namespace cu = hpx::cuda::experimental;
namespace tt = hpx::this_thread::experimental;
auto const f = [](cudaStream_t cuda_stream) {
dummy<<<1, 1, 0, cuda_stream>>>();
};
hpx::chrono::high_resolution_timer timer;
for (std::size_t i = 0; i < batch_iterations; ++i)
{
// We have to manually unroll this loop, because the type of the
// sender changes for each additional transform_stream call. The
// number of unrolled calls must match batch_size above.
cu::transform_stream(ex::just(), f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
cu::transform_stream(f, cuda_stream) |
ex::transfer(ex::thread_pool_scheduler{}) | tt::sync_wait();
}
// Do the remainder one-by-one
for (std::size_t i = 0; i < non_batch_iterations; ++i)
{
cu::transform_stream(ex::just(), f, cuda_stream) |
ex::transfer(ex::thread_pool_scheduler{}) | tt::sync_wait();
}
double elapsed = timer.elapsed();
std::cout
<< "transform_stream with transfer batched: "
<< elapsed << '\n';
}
hpx::cuda::experimental::check_cuda_error(cudaStreamDestroy(cuda_stream));
return hpx::local::finalize();
}
int main(int argc, char* argv[])
{
using namespace hpx::program_options;
options_description cmdline("usage: " HPX_APPLICATION_STRING " [options]");
cmdline.add_options()("iterations",
hpx::program_options::value<std::size_t>()->default_value(1024),
"number of iterations (default: 1024)");
hpx::local::init_params init_args;
init_args.desc_cmdline = cmdline;
return hpx::local::init(hpx_main, argc, argv, init_args);
}
#else
int main(int, char*[])
{
return 0;
}
#endif
|
d044563e79fe923dd622bbdebfc69159d4009e57.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/core/Tensor.h>
#include <ATen/ceil_div.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/ThrustAllocator.h>
#include <ATen/native/sparse/hip/SparseHIPApplyUtils.cuh>
#include <ATen/native/hip/SortingCommon.cuh>
#include <ATen/native/NonSymbolicBC.h>
#include <ATen/native/SparseTensorUtils.h>
#include <c10/macros/Macros.h>
#include <c10/util/accumulate.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_coalesce_native.h>
#include <ATen/ops/_sparse_coo_tensor_unsafe_native.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/zeros.h>
#endif
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/gather.h>
#include <thrust/generate.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/system/hip/execution_policy.h>
#include <thrust/transform.h>
#include <thrust/unique.h>
#include <thrust/system/hip/execution_policy.h>
#include <thrust/binary_search.h>
#include <c10/macros/Macros.h>
namespace at { namespace native {
using namespace at::sparse;
SparseTensor _coalesce_sparse_cuda(const SparseTensor& self) {
int64_t nnz = self._nnz();
TORCH_INTERNAL_ASSERT(!self.is_coalesced());
// NOTE: Since `coalesce` is not an in-place operation when `is_coalesced` is false,
// we should keep the original tensor intact and do coalesce on a copy of the tensor
if (nnz < 2) {
SparseTensor dst = self.clone();
dst._coalesced_(true);
return dst;
}
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
at::cuda::ThrustAllocator allocator;
auto policy = thrust::hip::par(allocator).on(stream);
// Replace instances with
// For indices, a simple sort + unique suffices
// For values, we use a custom kernel for segmented reduction (can't use Thrust due to indirection).
Tensor values = self._values();
int64_t sparse_dim = self.sparse_dim();
// indices will be modified by Thrust, so we have to clone or use new storage
// here.
Tensor indices1D = flatten_indices(self._indices(), self.sizes(), true);
Tensor origIndices = at::empty({nnz}, self._indices().options());
Tensor uniqueOffsets = at::empty({nnz}, self._indices().options());
typedef thrust::device_ptr<int64_t> thrust_ptr;
thrust_ptr indicesIter(indices1D.data_ptr<int64_t>());
thrust_ptr origIndicesIter(origIndices.data_ptr<int64_t>());
thrust_ptr uniqueOffsetsIter(uniqueOffsets.data_ptr<int64_t>());
// Fill sortedOrigIndices with sequential indices
thrust::counting_iterator<int64_t> countIterI(0);
thrust::counting_iterator<int64_t> countIterO(0);
thrust::copy(policy, countIterI, countIterI + nnz, origIndicesIter);
thrust::copy(policy, countIterO, countIterO + nnz, uniqueOffsetsIter);
thrust::sort_by_key(policy,
indicesIter, indicesIter + nnz,
origIndicesIter, LTOp<int64_t>()
);
// this forces device-host synchronization!
thrust::pair<thrust_ptr, thrust_ptr> newEnd = thrust::unique_by_key(policy,
indicesIter, indicesIter + nnz,
uniqueOffsetsIter
);
int64_t newNnz = newEnd.first - indicesIter;
indices1D.resize_({1, newNnz});
auto newValues_size = values.sizes().vec();
newValues_size[0] = newNnz;
Tensor newValues = at::empty(newValues_size, values.options());
// If there is no values to copy, save running the kernel.
if (newValues.numel() > 0) {
const int SZ = 4;
values = values.contiguous();
int64_t stride = c10::multiply_integers(values.sizes().slice(1));
int warp_size = at::cuda::warp_size();
dim3 grid(ceil_div(newNnz, (int64_t) SZ), ceil_div(stride, (int64_t) warp_size*SZ));
dim3 block(warp_size, SZ);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(
at::ScalarType::ComplexHalf, at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool,
values.scalar_type(), "coalesce_sparse_cuda", [&] {
using cuda_accscalar_t = acc_type<scalar_t, /* is_cuda */ true>;
hipLaunchKernelGGL(( apply::coalesceValuesKernel<scalar_t, cuda_accscalar_t>), dim3(grid), dim3(block), 0, stream,
uniqueOffsets.data_ptr<int64_t>(),
origIndices.data_ptr<int64_t>(),
values.data_ptr<scalar_t>(),
newValues.data_ptr<scalar_t>(),
nnz,
newNnz,
stride
);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
// this grid-strided version is slower but probably more flexible
// to different sizes
// int64_t blockX = min(stride, (int64_t) 512);
// dim3 block(blockX, 512 / blockX);
// int64_t grid = min((int64_t) 1024, ceil_div((int64_t) newNnz * stride, (int64_t) block.x * block.y));
// THCSTensor_coalesceValuesKernel_gridStrided<real, accreal><<<grid, block, 0, stream> >>(
// THCIndexTensor_(data)(state, uniqueOffsets),
// THCIndexTensor_(data)(state, origIndices),
// THCTensor_(data)(state, values),
// THCTensor_(data)(state, newValues),
// nnz,
// newNnz,
// stride
// );
// C10_HIP_KERNEL_LAUNCH_CHECK();
////////////////////////////////////////////////////////////
// unflatten indices if necessary
Tensor newIndices;
if (sparse_dim == 1) {
newIndices = indices1D;
} else {
newIndices = at::empty({sparse_dim, newNnz}, origIndices.options());
for (int64_t d = sparse_dim - 1; d >= 0; d--) {
// NB: Not a select, so I can preserve the outer dimension
Tensor indicesSlice = newIndices.narrow(0, d, 1);
indicesSlice.copy_(indices1D);
indices1D.divide_(self.size(d), "trunc");
indicesSlice.add_(indices1D, -self.size(d));
}
}
////////////////////////////////////////////////////////////
// We can use unsafe sparse tensor constructor because the indices do not
// need to be revalidated as we do not add or change indices, just remove
// duplicates.
SparseTensor dst = ::at::native::_sparse_coo_tensor_unsafe(newIndices, newValues, self.sizes())._coalesced_(true);
AT_CUDA_CHECK(hipGetLastError());
return dst;
}
}} // namespace at::native
| d044563e79fe923dd622bbdebfc69159d4009e57.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/core/Tensor.h>
#include <ATen/ceil_div.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/ThrustAllocator.h>
#include <ATen/native/sparse/cuda/SparseCUDAApplyUtils.cuh>
#include <ATen/native/cuda/SortingCommon.cuh>
#include <ATen/native/NonSymbolicBC.h>
#include <ATen/native/SparseTensorUtils.h>
#include <c10/macros/Macros.h>
#include <c10/util/accumulate.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_coalesce_native.h>
#include <ATen/ops/_sparse_coo_tensor_unsafe_native.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/zeros.h>
#endif
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/gather.h>
#include <thrust/generate.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/system/cuda/execution_policy.h>
#include <thrust/transform.h>
#include <thrust/unique.h>
#include <thrust/system/cuda/execution_policy.h>
#include <thrust/binary_search.h>
#include <c10/macros/Macros.h>
namespace at { namespace native {
using namespace at::sparse;
SparseTensor _coalesce_sparse_cuda(const SparseTensor& self) {
int64_t nnz = self._nnz();
TORCH_INTERNAL_ASSERT(!self.is_coalesced());
// NOTE: Since `coalesce` is not an in-place operation when `is_coalesced` is false,
// we should keep the original tensor intact and do coalesce on a copy of the tensor
if (nnz < 2) {
SparseTensor dst = self.clone();
dst._coalesced_(true);
return dst;
}
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
at::cuda::ThrustAllocator allocator;
auto policy = thrust::cuda::par(allocator).on(stream);
// Replace instances with
// For indices, a simple sort + unique suffices
// For values, we use a custom kernel for segmented reduction (can't use Thrust due to indirection).
Tensor values = self._values();
int64_t sparse_dim = self.sparse_dim();
// indices will be modified by Thrust, so we have to clone or use new storage
// here.
Tensor indices1D = flatten_indices(self._indices(), self.sizes(), true);
Tensor origIndices = at::empty({nnz}, self._indices().options());
Tensor uniqueOffsets = at::empty({nnz}, self._indices().options());
typedef thrust::device_ptr<int64_t> thrust_ptr;
thrust_ptr indicesIter(indices1D.data_ptr<int64_t>());
thrust_ptr origIndicesIter(origIndices.data_ptr<int64_t>());
thrust_ptr uniqueOffsetsIter(uniqueOffsets.data_ptr<int64_t>());
// Fill sortedOrigIndices with sequential indices
thrust::counting_iterator<int64_t> countIterI(0);
thrust::counting_iterator<int64_t> countIterO(0);
thrust::copy(policy, countIterI, countIterI + nnz, origIndicesIter);
thrust::copy(policy, countIterO, countIterO + nnz, uniqueOffsetsIter);
thrust::sort_by_key(policy,
indicesIter, indicesIter + nnz,
origIndicesIter, LTOp<int64_t>()
);
// this forces device-host synchronization!
thrust::pair<thrust_ptr, thrust_ptr> newEnd = thrust::unique_by_key(policy,
indicesIter, indicesIter + nnz,
uniqueOffsetsIter
);
int64_t newNnz = newEnd.first - indicesIter;
indices1D.resize_({1, newNnz});
auto newValues_size = values.sizes().vec();
newValues_size[0] = newNnz;
Tensor newValues = at::empty(newValues_size, values.options());
// If there is no values to copy, save running the kernel.
if (newValues.numel() > 0) {
const int SZ = 4;
values = values.contiguous();
int64_t stride = c10::multiply_integers(values.sizes().slice(1));
int warp_size = at::cuda::warp_size();
dim3 grid(ceil_div(newNnz, (int64_t) SZ), ceil_div(stride, (int64_t) warp_size*SZ));
dim3 block(warp_size, SZ);
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(
at::ScalarType::ComplexHalf, at::ScalarType::Half, at::ScalarType::BFloat16, at::ScalarType::Bool,
values.scalar_type(), "coalesce_sparse_cuda", [&] {
using cuda_accscalar_t = acc_type<scalar_t, /* is_cuda */ true>;
apply::coalesceValuesKernel<scalar_t, cuda_accscalar_t><<<grid, block, 0, stream>>>(
uniqueOffsets.data_ptr<int64_t>(),
origIndices.data_ptr<int64_t>(),
values.data_ptr<scalar_t>(),
newValues.data_ptr<scalar_t>(),
nnz,
newNnz,
stride
);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
// this grid-strided version is slower but probably more flexible
// to different sizes
// int64_t blockX = min(stride, (int64_t) 512);
// dim3 block(blockX, 512 / blockX);
// int64_t grid = min((int64_t) 1024, ceil_div((int64_t) newNnz * stride, (int64_t) block.x * block.y));
// THCSTensor_coalesceValuesKernel_gridStrided<real, accreal><<<grid, block, 0, stream> >>(
// THCIndexTensor_(data)(state, uniqueOffsets),
// THCIndexTensor_(data)(state, origIndices),
// THCTensor_(data)(state, values),
// THCTensor_(data)(state, newValues),
// nnz,
// newNnz,
// stride
// );
// C10_CUDA_KERNEL_LAUNCH_CHECK();
////////////////////////////////////////////////////////////
// unflatten indices if necessary
Tensor newIndices;
if (sparse_dim == 1) {
newIndices = indices1D;
} else {
newIndices = at::empty({sparse_dim, newNnz}, origIndices.options());
for (int64_t d = sparse_dim - 1; d >= 0; d--) {
// NB: Not a select, so I can preserve the outer dimension
Tensor indicesSlice = newIndices.narrow(0, d, 1);
indicesSlice.copy_(indices1D);
indices1D.divide_(self.size(d), "trunc");
indicesSlice.add_(indices1D, -self.size(d));
}
}
////////////////////////////////////////////////////////////
// We can use unsafe sparse tensor constructor because the indices do not
// need to be revalidated as we do not add or change indices, just remove
// duplicates.
SparseTensor dst = ::at::native::_sparse_coo_tensor_unsafe(newIndices, newValues, self.sizes())._coalesced_(true);
AT_CUDA_CHECK(cudaGetLastError());
return dst;
}
}} // namespace at::native
|
b187f2da042769c2a2dc98603139802a331956a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*! \file CTU_1D_cuda.cu
* \brief Definitions of the cuda CTU algorithm functions. */
#ifdef CUDA
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include"gpu.hpp"
#include"global.h"
#include"global_cuda.h"
#include"hydro_cuda.h"
#include"CTU_1D_cuda.h"
#include"pcm_cuda.h"
#include"plmp_cuda.h"
#include"plmc_cuda.h"
#include"ppmp_cuda.h"
#include"ppmc_cuda.h"
#include"exact_cuda.h"
#include"roe_cuda.h"
#include"hllc_cuda.h"
#include"cooling_cuda.h"
#include"error_handling.h"
#include"io.h"
Real CTU_Algorithm_1D_CUDA(Real *host_conserved0, Real *host_conserved1, int nx, int x_off, int n_ghost, Real dx, Real xbound, Real dt, int n_fields)
{
//Here, *host_conserved contains the entire
//set of conserved variables on the grid
//host_conserved0 contains the values at time n
//host_conserved1 will contain the values at time n+1
// Initialize dt values
Real max_dti = 0;
#ifdef COOLING_GPU
Real min_dt = 1e10;
#endif
int n_cells = nx;
int ny = 1;
int nz = 1;
// set the dimensions of the cuda grid
ngrid = (n_cells + TPB - 1) / TPB;
dim3 dimGrid(ngrid, 1, 1);
dim3 dimBlock(TPB, 1, 1);
if ( !memory_allocated ) {
// allocate an array on the CPU to hold max_dti returned from each thread block
CudaSafeCall( hipHostMalloc(&host_dti_array, ngrid*sizeof(Real), hipHostMallocDefault) );
#ifdef COOLING_GPU
CudaSafeCall( hipHostMalloc(&host_dt_array, ngrid*sizeof(Real), hipHostMallocDefault) );
#endif
// allocate memory on the GPU
CudaSafeCall( hipMalloc((void**)&dev_conserved, n_fields*n_cells*sizeof(Real)) );
CudaSafeCall( hipMalloc((void**)&Q_Lx, n_fields*n_cells*sizeof(Real)) );
CudaSafeCall( hipMalloc((void**)&Q_Rx, n_fields*n_cells*sizeof(Real)) );
CudaSafeCall( hipMalloc((void**)&F_x, (n_fields)*n_cells*sizeof(Real)) );
CudaSafeCall( hipMalloc((void**)&dev_dti_array, ngrid*sizeof(Real)) );
#if defined COOLING_GPU
CudaSafeCall( hipMalloc((void**)&dev_dt_array, ngrid*sizeof(Real)) );
#endif
#ifndef DYNAMIC_GPU_ALLOC
// If memory is single allocated: memory_allocated becomes true and succesive timesteps won't allocate memory.
// If the memory is not single allocated: memory_allocated remains Null and memory is allocated every timestep.
memory_allocated = true;
#endif
}
// copy the conserved variable array onto the GPU
CudaSafeCall( hipMemcpy(dev_conserved, host_conserved0, n_fields*n_cells*sizeof(Real), hipMemcpyHostToDevice) );
CudaCheckError();
// Step 1: Do the reconstruction
#ifdef PCM
hipLaunchKernelGGL(PCM_Reconstruction_1D, dimGrid, dimBlock, 0, 0, dev_conserved, Q_Lx, Q_Rx, nx, n_ghost, gama, n_fields);
CudaCheckError();
#endif
#ifdef PLMP
hipLaunchKernelGGL(PLMP_cuda, dimGrid, dimBlock, 0, 0, dev_conserved, Q_Lx, Q_Rx, nx, ny, nz, n_ghost, dx, dt, gama, 0, n_fields);
CudaCheckError();
#endif
#ifdef PLMC
hipLaunchKernelGGL(PLMC_cuda, dimGrid, dimBlock, 0, 0, dev_conserved, Q_Lx, Q_Rx, nx, ny, nz, n_ghost, dx, dt, gama, 0, n_fields);
CudaCheckError();
#endif
#ifdef PPMP
hipLaunchKernelGGL(PPMP_cuda, dimGrid, dimBlock, 0, 0, dev_conserved, Q_Lx, Q_Rx, nx, ny, nz, n_ghost, dx, dt, gama, 0, n_fields);
CudaCheckError();
#endif
#ifdef PPMC
hipLaunchKernelGGL(PPMC_cuda, dimGrid, dimBlock, 0, 0, dev_conserved, Q_Lx, Q_Rx, nx, ny, nz, n_ghost, dx, dt, gama, 0, n_fields);
CudaCheckError();
#endif
// Step 2: Calculate the fluxes
#ifdef EXACT
hipLaunchKernelGGL(Calculate_Exact_Fluxes_CUDA, dimGrid, dimBlock, 0, 0, Q_Lx, Q_Rx, F_x, nx, ny, nz, n_ghost, gama, 0, n_fields);
#endif
#ifdef ROE
hipLaunchKernelGGL(Calculate_Roe_Fluxes_CUDA, dimGrid, dimBlock, 0, 0, Q_Lx, Q_Rx, F_x, nx, ny, nz, n_ghost, gama, 0, n_fields);
#endif
#ifdef HLLC
hipLaunchKernelGGL(Calculate_HLLC_Fluxes_CUDA, dimGrid, dimBlock, 0, 0, Q_Lx, Q_Rx, F_x, nx, ny, nz, n_ghost, gama, 0, n_fields);
#endif
CudaCheckError();
#ifdef DE
// Compute the divergence of Vel before updating the conserved array, this solves syncronization issues when adding this term on Update_Conserved_Variables
hipLaunchKernelGGL(Partial_Update_Advected_Internal_Energy_1D, dimGrid, dimBlock, 0, 0, dev_conserved, Q_Lx, Q_Rx, nx, n_ghost, dx, dt, gama, n_fields );
#endif
// Step 3: Update the conserved variable array
hipLaunchKernelGGL(Update_Conserved_Variables_1D, dimGrid, dimBlock, 0, 0, dev_conserved, F_x, n_cells, x_off, n_ghost, dx, xbound, dt, gama, n_fields);
CudaCheckError();
// Sychronize the total and internal energy, if using dual-energy formalism
#ifdef DE
hipLaunchKernelGGL(Select_Internal_Energy_1D, dimGrid, dimBlock, 0, 0, dev_conserved, nx, n_ghost, n_fields);
hipLaunchKernelGGL(Sync_Energies_1D, dimGrid, dimBlock, 0, 0, dev_conserved, n_cells, n_ghost, gama, n_fields);
CudaCheckError();
#endif
// Apply cooling
#ifdef COOLING_GPU
hipLaunchKernelGGL(cooling_kernel, dimGrid, dimBlock, 0, 0, dev_conserved, nx, ny, nz, n_ghost, n_fields, dt, gama, dev_dti_array);
CudaCheckError();
#endif
// Calculate the next timestep
hipLaunchKernelGGL(Calc_dt_1D, dimGrid, dimBlock, 0, 0, dev_conserved, n_cells, n_ghost, dx, dev_dti_array, gama);
CudaCheckError();
// copy the conserved variable array back to the CPU
CudaSafeCall( hipMemcpy(host_conserved1, dev_conserved, n_fields*n_cells*sizeof(Real), hipMemcpyDeviceToHost) );
// copy the dti array onto the CPU
CudaSafeCall( hipMemcpy(host_dti_array, dev_dti_array, ngrid*sizeof(Real), hipMemcpyDeviceToHost) );
// iterate through to find the maximum inverse dt for this subgrid block
for (int i=0; i<ngrid; i++) {
max_dti = fmax(max_dti, host_dti_array[i]);
}
#if defined COOLING_GPU
// copy the dt array from cooling onto the CPU
CudaSafeCall( hipMemcpy(host_dt_array, dev_dt_array, ngrid*sizeof(Real), hipMemcpyDeviceToHost) );
// find maximum inverse timestep from cooling time
for (int i=0; i<ngrid; i++) {
min_dt = fmin(min_dt, host_dt_array[i]);
}
if (min_dt < C_cfl/max_dti) {
max_dti = C_cfl/min_dt;
}
#endif
#ifdef DYNAMIC_GPU_ALLOC
// If memory is not single allocated then free the memory every timestep.
Free_Memory_CTU_1D();
#endif
// return the maximum inverse timestep
return max_dti;
}
void Free_Memory_CTU_1D() {
// free the CPU memory
CudaSafeCall( hipHostFree(host_dti_array) );
#if defined COOLING_GPU
CudaSafeCall( hipHostFree(host_dt_array) );
#endif
// free the GPU memory
hipFree(dev_conserved);
hipFree(Q_Lx);
hipFree(Q_Rx);
hipFree(F_x);
hipFree(dev_dti_array);
#if defined COOLING_GPU
hipFree(dev_dt_array);
#endif
}
#endif //CUDA
| b187f2da042769c2a2dc98603139802a331956a2.cu | /*! \file CTU_1D_cuda.cu
* \brief Definitions of the cuda CTU algorithm functions. */
#ifdef CUDA
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include"gpu.hpp"
#include"global.h"
#include"global_cuda.h"
#include"hydro_cuda.h"
#include"CTU_1D_cuda.h"
#include"pcm_cuda.h"
#include"plmp_cuda.h"
#include"plmc_cuda.h"
#include"ppmp_cuda.h"
#include"ppmc_cuda.h"
#include"exact_cuda.h"
#include"roe_cuda.h"
#include"hllc_cuda.h"
#include"cooling_cuda.h"
#include"error_handling.h"
#include"io.h"
Real CTU_Algorithm_1D_CUDA(Real *host_conserved0, Real *host_conserved1, int nx, int x_off, int n_ghost, Real dx, Real xbound, Real dt, int n_fields)
{
//Here, *host_conserved contains the entire
//set of conserved variables on the grid
//host_conserved0 contains the values at time n
//host_conserved1 will contain the values at time n+1
// Initialize dt values
Real max_dti = 0;
#ifdef COOLING_GPU
Real min_dt = 1e10;
#endif
int n_cells = nx;
int ny = 1;
int nz = 1;
// set the dimensions of the cuda grid
ngrid = (n_cells + TPB - 1) / TPB;
dim3 dimGrid(ngrid, 1, 1);
dim3 dimBlock(TPB, 1, 1);
if ( !memory_allocated ) {
// allocate an array on the CPU to hold max_dti returned from each thread block
CudaSafeCall( cudaHostAlloc(&host_dti_array, ngrid*sizeof(Real), cudaHostAllocDefault) );
#ifdef COOLING_GPU
CudaSafeCall( cudaHostAlloc(&host_dt_array, ngrid*sizeof(Real), cudaHostAllocDefault) );
#endif
// allocate memory on the GPU
CudaSafeCall( cudaMalloc((void**)&dev_conserved, n_fields*n_cells*sizeof(Real)) );
CudaSafeCall( cudaMalloc((void**)&Q_Lx, n_fields*n_cells*sizeof(Real)) );
CudaSafeCall( cudaMalloc((void**)&Q_Rx, n_fields*n_cells*sizeof(Real)) );
CudaSafeCall( cudaMalloc((void**)&F_x, (n_fields)*n_cells*sizeof(Real)) );
CudaSafeCall( cudaMalloc((void**)&dev_dti_array, ngrid*sizeof(Real)) );
#if defined COOLING_GPU
CudaSafeCall( cudaMalloc((void**)&dev_dt_array, ngrid*sizeof(Real)) );
#endif
#ifndef DYNAMIC_GPU_ALLOC
// If memory is single allocated: memory_allocated becomes true and succesive timesteps won't allocate memory.
// If the memory is not single allocated: memory_allocated remains Null and memory is allocated every timestep.
memory_allocated = true;
#endif
}
// copy the conserved variable array onto the GPU
CudaSafeCall( cudaMemcpy(dev_conserved, host_conserved0, n_fields*n_cells*sizeof(Real), cudaMemcpyHostToDevice) );
CudaCheckError();
// Step 1: Do the reconstruction
#ifdef PCM
hipLaunchKernelGGL(PCM_Reconstruction_1D, dimGrid, dimBlock, 0, 0, dev_conserved, Q_Lx, Q_Rx, nx, n_ghost, gama, n_fields);
CudaCheckError();
#endif
#ifdef PLMP
hipLaunchKernelGGL(PLMP_cuda, dimGrid, dimBlock, 0, 0, dev_conserved, Q_Lx, Q_Rx, nx, ny, nz, n_ghost, dx, dt, gama, 0, n_fields);
CudaCheckError();
#endif
#ifdef PLMC
hipLaunchKernelGGL(PLMC_cuda, dimGrid, dimBlock, 0, 0, dev_conserved, Q_Lx, Q_Rx, nx, ny, nz, n_ghost, dx, dt, gama, 0, n_fields);
CudaCheckError();
#endif
#ifdef PPMP
hipLaunchKernelGGL(PPMP_cuda, dimGrid, dimBlock, 0, 0, dev_conserved, Q_Lx, Q_Rx, nx, ny, nz, n_ghost, dx, dt, gama, 0, n_fields);
CudaCheckError();
#endif
#ifdef PPMC
hipLaunchKernelGGL(PPMC_cuda, dimGrid, dimBlock, 0, 0, dev_conserved, Q_Lx, Q_Rx, nx, ny, nz, n_ghost, dx, dt, gama, 0, n_fields);
CudaCheckError();
#endif
// Step 2: Calculate the fluxes
#ifdef EXACT
hipLaunchKernelGGL(Calculate_Exact_Fluxes_CUDA, dimGrid, dimBlock, 0, 0, Q_Lx, Q_Rx, F_x, nx, ny, nz, n_ghost, gama, 0, n_fields);
#endif
#ifdef ROE
hipLaunchKernelGGL(Calculate_Roe_Fluxes_CUDA, dimGrid, dimBlock, 0, 0, Q_Lx, Q_Rx, F_x, nx, ny, nz, n_ghost, gama, 0, n_fields);
#endif
#ifdef HLLC
hipLaunchKernelGGL(Calculate_HLLC_Fluxes_CUDA, dimGrid, dimBlock, 0, 0, Q_Lx, Q_Rx, F_x, nx, ny, nz, n_ghost, gama, 0, n_fields);
#endif
CudaCheckError();
#ifdef DE
// Compute the divergence of Vel before updating the conserved array, this solves syncronization issues when adding this term on Update_Conserved_Variables
hipLaunchKernelGGL(Partial_Update_Advected_Internal_Energy_1D, dimGrid, dimBlock, 0, 0, dev_conserved, Q_Lx, Q_Rx, nx, n_ghost, dx, dt, gama, n_fields );
#endif
// Step 3: Update the conserved variable array
hipLaunchKernelGGL(Update_Conserved_Variables_1D, dimGrid, dimBlock, 0, 0, dev_conserved, F_x, n_cells, x_off, n_ghost, dx, xbound, dt, gama, n_fields);
CudaCheckError();
// Sychronize the total and internal energy, if using dual-energy formalism
#ifdef DE
hipLaunchKernelGGL(Select_Internal_Energy_1D, dimGrid, dimBlock, 0, 0, dev_conserved, nx, n_ghost, n_fields);
hipLaunchKernelGGL(Sync_Energies_1D, dimGrid, dimBlock, 0, 0, dev_conserved, n_cells, n_ghost, gama, n_fields);
CudaCheckError();
#endif
// Apply cooling
#ifdef COOLING_GPU
hipLaunchKernelGGL(cooling_kernel, dimGrid, dimBlock, 0, 0, dev_conserved, nx, ny, nz, n_ghost, n_fields, dt, gama, dev_dti_array);
CudaCheckError();
#endif
// Calculate the next timestep
hipLaunchKernelGGL(Calc_dt_1D, dimGrid, dimBlock, 0, 0, dev_conserved, n_cells, n_ghost, dx, dev_dti_array, gama);
CudaCheckError();
// copy the conserved variable array back to the CPU
CudaSafeCall( cudaMemcpy(host_conserved1, dev_conserved, n_fields*n_cells*sizeof(Real), cudaMemcpyDeviceToHost) );
// copy the dti array onto the CPU
CudaSafeCall( cudaMemcpy(host_dti_array, dev_dti_array, ngrid*sizeof(Real), cudaMemcpyDeviceToHost) );
// iterate through to find the maximum inverse dt for this subgrid block
for (int i=0; i<ngrid; i++) {
max_dti = fmax(max_dti, host_dti_array[i]);
}
#if defined COOLING_GPU
// copy the dt array from cooling onto the CPU
CudaSafeCall( cudaMemcpy(host_dt_array, dev_dt_array, ngrid*sizeof(Real), cudaMemcpyDeviceToHost) );
// find maximum inverse timestep from cooling time
for (int i=0; i<ngrid; i++) {
min_dt = fmin(min_dt, host_dt_array[i]);
}
if (min_dt < C_cfl/max_dti) {
max_dti = C_cfl/min_dt;
}
#endif
#ifdef DYNAMIC_GPU_ALLOC
// If memory is not single allocated then free the memory every timestep.
Free_Memory_CTU_1D();
#endif
// return the maximum inverse timestep
return max_dti;
}
void Free_Memory_CTU_1D() {
// free the CPU memory
CudaSafeCall( cudaFreeHost(host_dti_array) );
#if defined COOLING_GPU
CudaSafeCall( cudaFreeHost(host_dt_array) );
#endif
// free the GPU memory
cudaFree(dev_conserved);
cudaFree(Q_Lx);
cudaFree(Q_Rx);
cudaFree(F_x);
cudaFree(dev_dti_array);
#if defined COOLING_GPU
cudaFree(dev_dt_array);
#endif
}
#endif //CUDA
|
6a918dafe1d6384f10444b5b951ccc66b24989f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**********************************************************************************
Filename : cuda_RayCastKernels.cu
Authors : Jing Xu, Kevin Wong, Yifan Jian, Marinko Sarunic
Published : Janurary 6th, 2014
Copyright (C) 2014 Biomedical Optics Research Group - Simon Fraser University
This software contains source code provided by NVIDIA Corporation.
This file is part of a Open Source software. Details of this software has been described
in the papers titled:
"Jing Xu, Kevin Wong, Yifan Jian, and Marinko V. Sarunic.
'Real-time acquisition and display of flow contrast with speckle variance OCT using GPU'
In press (JBO)
and
"Jian, Yifan, Kevin Wong, and Marinko V. Sarunic. 'GPU accelerated OCT processing at
megahertz axial scan rate and high resolution video rate volumetric rendering.'
In SPIE BiOS, pp. 85710Z-85710Z. International Society for Optics and Photonics, 2013."
Please refer to these papers for further information about this software. Redistribution
and modification of this code is restricted to academic purposes ONLY, provided that
the following conditions are met:
- Redistribution of this code must retain the above copyright notice, this list of
conditions and the following disclaimer
- Any use, disclosure, reproduction, or redistribution of this software outside of
academic purposes is strictly prohibited
*DISCLAIMER*
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT OWNERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are
those of the authors and should not be interpreted as representing official
policies, either expressed or implied.
**********************************************************************************/
/*NVIDIA's Disclaimer
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// Simple 3D volume renderer
#ifndef _VOLUMERENDER_KERNEL_CU_
#define _VOLUMERENDER_KERNEL_CU_
#include <helper_math.h>
bool mallocVolumeArray = false;
typedef unsigned int uint;
typedef unsigned char uchar;
hipArray *d_volumeArray = 0;
hipStream_t renderStream;
texture<float, 3, hipReadModeElementType> tex; // 3D texture
typedef struct {
float4 m[3];
} float3x4;
__constant__ float3x4 c_invViewMatrix; // inverse view matrix
struct Ray {
float3 o; // origin
float3 d; // direction
};
__device__
int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar)
{
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.0f) / r.d;
float3 tbot = invR * (boxmin - r.o);
float3 ttop = invR * (boxmax - r.o);
// re-order intersections to find smallest and largest on each axis
float3 tmin = fminf(ttop, tbot);
float3 tmax = fmaxf(ttop, tbot);
// find the largest tmin and the smallest tmax
float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
*tnear = largest_tmin;
*tfar = smallest_tmax;
return smallest_tmax > largest_tmin;
}
// transform vector by matrix (no translation)
__device__
float3 mul(const float3x4 &M, const float3 &v)
{
float3 r;
r.x = dot(v, make_float3(M.m[0]));
r.y = dot(v, make_float3(M.m[1]));
r.z = dot(v, make_float3(M.m[2]));
return r;
}
// transform vector by matrix with translation
__device__
float4 mul(const float3x4 &M, const float4 &v)
{
float4 r;
r.x = dot(v, M.m[0]);
r.y = dot(v, M.m[1]);
r.z = dot(v, M.m[2]);
r.w = 1.0f;
return r;
}
__global__ void
d_render(float *d_output, uint imageW, uint imageH,
float density, float brightness,
float transferOffset, float transferScale, float voxelThreshold)
{
const int maxSteps = 500;
const float tstep = 0.01f;
const float3 boxMin = make_float3(-1.0f, -1.0f, -1.0f);
const float3 boxMax = make_float3(1.0f, 1.0f, 1.0f);
uint x = blockIdx.x*blockDim.x + threadIdx.x;
uint y = blockIdx.y*blockDim.y + threadIdx.y;
if ((x >= imageW) || (y >= imageH)) return;
float u = (x / (float) imageW)*2.0f-1.0f;
float v = (y / (float) imageH)*2.0f-1.0f;
// calculate eye ray in world space
Ray eyeRay;
eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f)));
eyeRay.d = normalize(make_float3(u, v, -2.0f));
eyeRay.d = mul(c_invViewMatrix, eyeRay.d);
// find intersection with box
float tnear, tfar;
int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar);
if (!hit) return;
if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane
// march along ray from front to back, accumulating color
__shared__ float sum[256];
__shared__ float subtractValue[256];
__shared__ float opacThreshold[256];
float t = tnear;
int thrIdx = threadIdx.x;
sum[thrIdx] = 0;
subtractValue[thrIdx] = 0;
opacThreshold[thrIdx] = 0.90f;
float3 pos = eyeRay.o + eyeRay.d*tnear;
float3 step = eyeRay.d*tstep;
for(int i=0; i<maxSteps; i++) {
// read from 3D texture
// remap position to [0, 1] coordinates
float sample = tex3D(tex, pos.x*0.5f+0.5f, pos.y*0.5f+0.5f, pos.z*0.5f+0.5f);
sample *= 0.2f;
if (sum[thrIdx]>0.0f) {
subtractValue[thrIdx] += 0.01f;
opacThreshold[thrIdx] -= 0.02f;
}
if (sum[thrIdx]==0.0f && sample > voxelThreshold) {
sum[thrIdx] += sample;
} else if (sum[threadIdx.x]>0.0f && sample - subtractValue[thrIdx] > 0.0f) {
sum[thrIdx] += sample - subtractValue[thrIdx];
}
if (sum[thrIdx] >= opacThreshold[thrIdx]) break;
t += tstep;
if (t > tfar) break;
pos += step;
}
d_output[y*imageW + x] = sum[thrIdx];
}
/*************************************************************************************************************************/
/*************************************** END OF KERNELS ***************************************************************/
/*************************************************************************************************************************/
//Initialization for MemcpyDeviceToDevice, for Processing AND Volume Rendering
void initRayCastCuda(void *d_volume, hipExtent volumeSize, hipMemcpyKind memcpyKind)
{
// create 3D array
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
if (!mallocVolumeArray) {
hipStreamCreate(&renderStream);
hipMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize);
mallocVolumeArray = true;
}
// copy data to 3D array
hipMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_hipPitchedPtr(d_volume, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_volumeArray;
copyParams.extent = volumeSize;
copyParams.kind = memcpyKind;
hipMemcpy3D(©Params);
// set texture parameters
tex.normalized = true; // access with normalized texture coordinates
tex.filterMode = hipFilterModeLinear; // linear interpolation
tex.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates
tex.addressMode[1] = hipAddressModeClamp;
// bind array to 3D texture
hipBindTextureToArray(tex, d_volumeArray, channelDesc);
}
void freeVolumeBuffers()
{
hipFreeArray(d_volumeArray);
mallocVolumeArray = false;
}
void rayCast_kernel(dim3 gridSize, dim3 blockSize, float *d_output, int imageW, int imageH,
float density, float brightness, float transferOffset, float transferScale,
float voxelThreshold)
{
hipLaunchKernelGGL(( d_render), dim3(gridSize), dim3(blockSize), 0, renderStream, d_output, imageW, imageH, density,
brightness, transferOffset, transferScale, voxelThreshold);
}
void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix)
{
hipMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix);
}
#endif // #ifndef _VOLUMERENDER_KERNEL_CU_
| 6a918dafe1d6384f10444b5b951ccc66b24989f9.cu | /**********************************************************************************
Filename : cuda_RayCastKernels.cu
Authors : Jing Xu, Kevin Wong, Yifan Jian, Marinko Sarunic
Published : Janurary 6th, 2014
Copyright (C) 2014 Biomedical Optics Research Group - Simon Fraser University
This software contains source code provided by NVIDIA Corporation.
This file is part of a Open Source software. Details of this software has been described
in the papers titled:
"Jing Xu, Kevin Wong, Yifan Jian, and Marinko V. Sarunic.
'Real-time acquisition and display of flow contrast with speckle variance OCT using GPU'
In press (JBO)
and
"Jian, Yifan, Kevin Wong, and Marinko V. Sarunic. 'GPU accelerated OCT processing at
megahertz axial scan rate and high resolution video rate volumetric rendering.'
In SPIE BiOS, pp. 85710Z-85710Z. International Society for Optics and Photonics, 2013."
Please refer to these papers for further information about this software. Redistribution
and modification of this code is restricted to academic purposes ONLY, provided that
the following conditions are met:
- Redistribution of this code must retain the above copyright notice, this list of
conditions and the following disclaimer
- Any use, disclosure, reproduction, or redistribution of this software outside of
academic purposes is strictly prohibited
*DISCLAIMER*
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT OWNERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are
those of the authors and should not be interpreted as representing official
policies, either expressed or implied.
**********************************************************************************/
/*NVIDIA's Disclaimer
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// Simple 3D volume renderer
#ifndef _VOLUMERENDER_KERNEL_CU_
#define _VOLUMERENDER_KERNEL_CU_
#include <helper_math.h>
bool mallocVolumeArray = false;
typedef unsigned int uint;
typedef unsigned char uchar;
cudaArray *d_volumeArray = 0;
cudaStream_t renderStream;
texture<float, 3, cudaReadModeElementType> tex; // 3D texture
typedef struct {
float4 m[3];
} float3x4;
__constant__ float3x4 c_invViewMatrix; // inverse view matrix
struct Ray {
float3 o; // origin
float3 d; // direction
};
__device__
int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar)
{
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.0f) / r.d;
float3 tbot = invR * (boxmin - r.o);
float3 ttop = invR * (boxmax - r.o);
// re-order intersections to find smallest and largest on each axis
float3 tmin = fminf(ttop, tbot);
float3 tmax = fmaxf(ttop, tbot);
// find the largest tmin and the smallest tmax
float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
*tnear = largest_tmin;
*tfar = smallest_tmax;
return smallest_tmax > largest_tmin;
}
// transform vector by matrix (no translation)
__device__
float3 mul(const float3x4 &M, const float3 &v)
{
float3 r;
r.x = dot(v, make_float3(M.m[0]));
r.y = dot(v, make_float3(M.m[1]));
r.z = dot(v, make_float3(M.m[2]));
return r;
}
// transform vector by matrix with translation
__device__
float4 mul(const float3x4 &M, const float4 &v)
{
float4 r;
r.x = dot(v, M.m[0]);
r.y = dot(v, M.m[1]);
r.z = dot(v, M.m[2]);
r.w = 1.0f;
return r;
}
__global__ void
d_render(float *d_output, uint imageW, uint imageH,
float density, float brightness,
float transferOffset, float transferScale, float voxelThreshold)
{
const int maxSteps = 500;
const float tstep = 0.01f;
const float3 boxMin = make_float3(-1.0f, -1.0f, -1.0f);
const float3 boxMax = make_float3(1.0f, 1.0f, 1.0f);
uint x = blockIdx.x*blockDim.x + threadIdx.x;
uint y = blockIdx.y*blockDim.y + threadIdx.y;
if ((x >= imageW) || (y >= imageH)) return;
float u = (x / (float) imageW)*2.0f-1.0f;
float v = (y / (float) imageH)*2.0f-1.0f;
// calculate eye ray in world space
Ray eyeRay;
eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f)));
eyeRay.d = normalize(make_float3(u, v, -2.0f));
eyeRay.d = mul(c_invViewMatrix, eyeRay.d);
// find intersection with box
float tnear, tfar;
int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar);
if (!hit) return;
if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane
// march along ray from front to back, accumulating color
__shared__ float sum[256];
__shared__ float subtractValue[256];
__shared__ float opacThreshold[256];
float t = tnear;
int thrIdx = threadIdx.x;
sum[thrIdx] = 0;
subtractValue[thrIdx] = 0;
opacThreshold[thrIdx] = 0.90f;
float3 pos = eyeRay.o + eyeRay.d*tnear;
float3 step = eyeRay.d*tstep;
for(int i=0; i<maxSteps; i++) {
// read from 3D texture
// remap position to [0, 1] coordinates
float sample = tex3D(tex, pos.x*0.5f+0.5f, pos.y*0.5f+0.5f, pos.z*0.5f+0.5f);
sample *= 0.2f;
if (sum[thrIdx]>0.0f) {
subtractValue[thrIdx] += 0.01f;
opacThreshold[thrIdx] -= 0.02f;
}
if (sum[thrIdx]==0.0f && sample > voxelThreshold) {
sum[thrIdx] += sample;
} else if (sum[threadIdx.x]>0.0f && sample - subtractValue[thrIdx] > 0.0f) {
sum[thrIdx] += sample - subtractValue[thrIdx];
}
if (sum[thrIdx] >= opacThreshold[thrIdx]) break;
t += tstep;
if (t > tfar) break;
pos += step;
}
d_output[y*imageW + x] = sum[thrIdx];
}
/*************************************************************************************************************************/
/*************************************** END OF KERNELS ***************************************************************/
/*************************************************************************************************************************/
//Initialization for MemcpyDeviceToDevice, for Processing AND Volume Rendering
void initRayCastCuda(void *d_volume, cudaExtent volumeSize, cudaMemcpyKind memcpyKind)
{
// create 3D array
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
if (!mallocVolumeArray) {
cudaStreamCreate(&renderStream);
cudaMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize);
mallocVolumeArray = true;
}
// copy data to 3D array
cudaMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_cudaPitchedPtr(d_volume, volumeSize.width*sizeof(float), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_volumeArray;
copyParams.extent = volumeSize;
copyParams.kind = memcpyKind;
cudaMemcpy3D(©Params);
// set texture parameters
tex.normalized = true; // access with normalized texture coordinates
tex.filterMode = cudaFilterModeLinear; // linear interpolation
tex.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates
tex.addressMode[1] = cudaAddressModeClamp;
// bind array to 3D texture
cudaBindTextureToArray(tex, d_volumeArray, channelDesc);
}
void freeVolumeBuffers()
{
cudaFreeArray(d_volumeArray);
mallocVolumeArray = false;
}
void rayCast_kernel(dim3 gridSize, dim3 blockSize, float *d_output, int imageW, int imageH,
float density, float brightness, float transferOffset, float transferScale,
float voxelThreshold)
{
d_render<<<gridSize, blockSize, 0, renderStream>>>( d_output, imageW, imageH, density,
brightness, transferOffset, transferScale, voxelThreshold);
}
void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix)
{
cudaMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix);
}
#endif // #ifndef _VOLUMERENDER_KERNEL_CU_
|
48d17d555cd7b1ff23ac7e80cc19bce944379ae9.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/types.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <vector>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHAtomics.cuh>
#include <THH/THHDeviceUtils.cuh>
typedef unsigned long uint64;
typedef unsigned int uint32;
typedef unsigned short uint16;
typedef unsigned char uint8;
#define _FLT_MAX 3.402823466e+38F
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
inline int GET_BLOCKS(const int N){
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
template <typename Dtype>
__global__ void AlignFeatureKernel(
const uint32 nthreads,
const torch::PackedTensorAccessor<Dtype, 4, torch::RestrictPtrTraits, size_t> feature_data,
const uint16 nBatch,
const uint16 nFeature,
const uint8 nOrientation,
torch::PackedTensorAccessor<uint8, 2, torch::RestrictPtrTraits, size_t> mainDirection_data,
torch::PackedTensorAccessor<Dtype, 4, torch::RestrictPtrTraits, size_t> aligned_data)
{
CUDA_KERNEL_LOOP(n, nthreads) {
uint8 l;
const uint16 j = n % nFeature;
const uint16 i = n / nFeature;
Dtype maxVal = -_FLT_MAX;
for (l = 0; l < nOrientation; l++) {
Dtype val = feature_data[i][j * nOrientation + l][0][0];
if (val > maxVal) {
maxVal = val;
mainDirection_data[i][j] = l;
}
}
for (l = 0; l < nOrientation; l++) {
Dtype src = feature_data[i][j * nOrientation + l][0][0];
uint8 alignedIndex = (l - mainDirection_data[i][j] + nOrientation) % nOrientation;
aligned_data[i][j * nOrientation + alignedIndex][0][0] = src;
}
}
}
std::vector<torch::Tensor> RIE_AlignFeature_forward_cuda(
const torch::Tensor feature,
const uint8 nOrientation)
{
AT_ASSERTM(feature.type().is_cuda(), "feature must be a CUDA tensor");
AT_ASSERTM((feature.size(2) == 1) and (feature.size(3) == 1), "feature must be 1-D tensor in dim=2, 3");
const uint16 nBatch = feature.size(0);
const uint16 nChannel = feature.size(1);
const uint16 nFeature = nChannel / nOrientation;
const uint32 count = nBatch * nFeature;
const auto feature_data = feature;
auto mainDirection_data = torch::zeros({nBatch, nFeature}, feature.options().dtype(at::kByte).device(at::kCUDA));
auto aligned_data = torch::zeros({nBatch, nChannel, feature.size(2), feature.size(3)}, feature.options().dtype(at::kFloat).device(at::kCUDA));
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES(feature.type(), "rie_cuda_forward", [&] {
hipLaunchKernelGGL(( AlignFeatureKernel<scalar_t>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, stream ,
count,
feature_data.packed_accessor<scalar_t, 4, torch::RestrictPtrTraits, size_t>(),
nBatch,
nFeature,
nOrientation,
mainDirection_data.packed_accessor<uint8, 2, torch::RestrictPtrTraits, size_t>(),
aligned_data.packed_accessor<scalar_t, 4, torch::RestrictPtrTraits, size_t>());
});
THCudaCheck(hipGetLastError());
return {aligned_data, mainDirection_data};
}
template <typename Dtype>
__global__ void UnAlignFeatureKernel(
const uint32 nthreads,
const torch::PackedTensorAccessor<Dtype, 4, torch::RestrictPtrTraits, size_t> feature_data,
const uint16 nBatch,
const uint16 nFeature,
const uint8 nOrientation,
torch::PackedTensorAccessor<uint8, 2, torch::RestrictPtrTraits, size_t> mainDirection_data,
torch::PackedTensorAccessor<Dtype, 4, torch::RestrictPtrTraits, size_t> unaligned_data)
{
CUDA_KERNEL_LOOP(n, nthreads) {
uint8 l;
const uint16 j = n % nFeature;
const uint16 i = n / nFeature;
for (l = 0; l < nOrientation; l++) {
Dtype src = feature_data[i][j * nOrientation + l][0][0];
uint8 alignedIndex = (l + mainDirection_data[i][j]) % nOrientation;
unaligned_data[i][j * nOrientation + alignedIndex][0][0] = src;
}
}
}
torch::Tensor RIE_AlignFeature_backward_cuda(
const torch::Tensor feature, //feature is the align output grad paras
const torch::Tensor mainDirection,
const uint8 nOrientation)
{
AT_ASSERTM(feature.type().is_cuda(), "feature must be a CUDA tensor");
AT_ASSERTM((feature.size(2) == 1) and (feature.size(3) == 1), "feature must be 1-D tensor in dim=2, 3");
const uint16 nBatch = feature.size(0);
const uint16 nChannel = feature.size(1);
const uint16 nFeature = nChannel / nOrientation;
const uint32 count = nBatch * nFeature;
const auto feature_data = feature;
const auto mainDirection_data = mainDirection;
auto unaligned_data = torch::zeros({nBatch, nChannel, feature.size(2), feature.size(3)}, feature.options().dtype(at::kFloat).device(at::kCUDA));
//feature is the align output grad paras
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES(feature.type(), "rie_cuda_backward", [&] {
hipLaunchKernelGGL(( UnAlignFeatureKernel<scalar_t>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, stream ,
count,
feature_data.packed_accessor<scalar_t, 4, torch::RestrictPtrTraits, size_t>(),
nBatch,
nFeature,
nOrientation,
mainDirection_data.packed_accessor<uint8, 2, torch::RestrictPtrTraits, size_t>(),
unaligned_data.packed_accessor<scalar_t, 4, torch::RestrictPtrTraits, size_t>());
});
THCudaCheck(hipGetLastError());
return unaligned_data;
} | 48d17d555cd7b1ff23ac7e80cc19bce944379ae9.cu | #include <torch/types.h>
#include <stdio.h>
#include <cuda.h>
#include <vector>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCAtomics.cuh>
#include <THC/THCDeviceUtils.cuh>
typedef unsigned long uint64;
typedef unsigned int uint32;
typedef unsigned short uint16;
typedef unsigned char uint8;
#define _FLT_MAX 3.402823466e+38F
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x)
const int CUDA_NUM_THREADS = 1024;
inline int GET_BLOCKS(const int N){
return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
template <typename Dtype>
__global__ void AlignFeatureKernel(
const uint32 nthreads,
const torch::PackedTensorAccessor<Dtype, 4, torch::RestrictPtrTraits, size_t> feature_data,
const uint16 nBatch,
const uint16 nFeature,
const uint8 nOrientation,
torch::PackedTensorAccessor<uint8, 2, torch::RestrictPtrTraits, size_t> mainDirection_data,
torch::PackedTensorAccessor<Dtype, 4, torch::RestrictPtrTraits, size_t> aligned_data)
{
CUDA_KERNEL_LOOP(n, nthreads) {
uint8 l;
const uint16 j = n % nFeature;
const uint16 i = n / nFeature;
Dtype maxVal = -_FLT_MAX;
for (l = 0; l < nOrientation; l++) {
Dtype val = feature_data[i][j * nOrientation + l][0][0];
if (val > maxVal) {
maxVal = val;
mainDirection_data[i][j] = l;
}
}
for (l = 0; l < nOrientation; l++) {
Dtype src = feature_data[i][j * nOrientation + l][0][0];
uint8 alignedIndex = (l - mainDirection_data[i][j] + nOrientation) % nOrientation;
aligned_data[i][j * nOrientation + alignedIndex][0][0] = src;
}
}
}
std::vector<torch::Tensor> RIE_AlignFeature_forward_cuda(
const torch::Tensor feature,
const uint8 nOrientation)
{
AT_ASSERTM(feature.type().is_cuda(), "feature must be a CUDA tensor");
AT_ASSERTM((feature.size(2) == 1) and (feature.size(3) == 1), "feature must be 1-D tensor in dim=2, 3");
const uint16 nBatch = feature.size(0);
const uint16 nChannel = feature.size(1);
const uint16 nFeature = nChannel / nOrientation;
const uint32 count = nBatch * nFeature;
const auto feature_data = feature;
auto mainDirection_data = torch::zeros({nBatch, nFeature}, feature.options().dtype(at::kByte).device(at::kCUDA));
auto aligned_data = torch::zeros({nBatch, nChannel, feature.size(2), feature.size(3)}, feature.options().dtype(at::kFloat).device(at::kCUDA));
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES(feature.type(), "rie_cuda_forward", [&] {
AlignFeatureKernel<scalar_t> <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream >>>(
count,
feature_data.packed_accessor<scalar_t, 4, torch::RestrictPtrTraits, size_t>(),
nBatch,
nFeature,
nOrientation,
mainDirection_data.packed_accessor<uint8, 2, torch::RestrictPtrTraits, size_t>(),
aligned_data.packed_accessor<scalar_t, 4, torch::RestrictPtrTraits, size_t>());
});
THCudaCheck(cudaGetLastError());
return {aligned_data, mainDirection_data};
}
template <typename Dtype>
__global__ void UnAlignFeatureKernel(
const uint32 nthreads,
const torch::PackedTensorAccessor<Dtype, 4, torch::RestrictPtrTraits, size_t> feature_data,
const uint16 nBatch,
const uint16 nFeature,
const uint8 nOrientation,
torch::PackedTensorAccessor<uint8, 2, torch::RestrictPtrTraits, size_t> mainDirection_data,
torch::PackedTensorAccessor<Dtype, 4, torch::RestrictPtrTraits, size_t> unaligned_data)
{
CUDA_KERNEL_LOOP(n, nthreads) {
uint8 l;
const uint16 j = n % nFeature;
const uint16 i = n / nFeature;
for (l = 0; l < nOrientation; l++) {
Dtype src = feature_data[i][j * nOrientation + l][0][0];
uint8 alignedIndex = (l + mainDirection_data[i][j]) % nOrientation;
unaligned_data[i][j * nOrientation + alignedIndex][0][0] = src;
}
}
}
torch::Tensor RIE_AlignFeature_backward_cuda(
const torch::Tensor feature, //feature is the align output grad paras
const torch::Tensor mainDirection,
const uint8 nOrientation)
{
AT_ASSERTM(feature.type().is_cuda(), "feature must be a CUDA tensor");
AT_ASSERTM((feature.size(2) == 1) and (feature.size(3) == 1), "feature must be 1-D tensor in dim=2, 3");
const uint16 nBatch = feature.size(0);
const uint16 nChannel = feature.size(1);
const uint16 nFeature = nChannel / nOrientation;
const uint32 count = nBatch * nFeature;
const auto feature_data = feature;
const auto mainDirection_data = mainDirection;
auto unaligned_data = torch::zeros({nBatch, nChannel, feature.size(2), feature.size(3)}, feature.options().dtype(at::kFloat).device(at::kCUDA));
//feature is the align output grad paras
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES(feature.type(), "rie_cuda_backward", [&] {
UnAlignFeatureKernel<scalar_t> <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, stream >>>(
count,
feature_data.packed_accessor<scalar_t, 4, torch::RestrictPtrTraits, size_t>(),
nBatch,
nFeature,
nOrientation,
mainDirection_data.packed_accessor<uint8, 2, torch::RestrictPtrTraits, size_t>(),
unaligned_data.packed_accessor<scalar_t, 4, torch::RestrictPtrTraits, size_t>());
});
THCudaCheck(cudaGetLastError());
return unaligned_data;
} |
95cd074e5e94efb0d6aac2c985ba0f65aa8a2e0d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "adjusthue_impl.cuh"
#include <algorithm>
#include <cmath>
struct RgbTuple {
float cu_r;
float cu_g;
float cu_b;
};
struct HsvTuple {
float cu_h;
float cu_s;
float cu_v;
};
__device__ __forceinline__ HsvTuple rgb2hsv_cuda(const float cu_r, const float cu_g, const float cu_b) {
HsvTuple tuple;
const float cu_M = max(cu_r, max(cu_g, cu_b));
const float cu_m = min(cu_r, min(cu_g, cu_b));
const float cu_chroma = cu_M - cu_m;
float cu_h = 0.0f;
float cu_s = 0.0f;
if (cu_chroma > 0.0f) {
if (cu_M == cu_r) {
const float cu_num = (cu_g - cu_b) / cu_chroma;
const float cu_sign = copysignf(1.0f, cu_num);
cu_h = ((cu_sign < 0.0f) * 6.0f + cu_sign * fmodf(cu_sign * cu_num, 6.0f)) / 6.0f;
} else if (cu_M == cu_g) {
cu_h = ((cu_b - cu_r) / cu_chroma + 2.0f) / 6.0f;
} else {
cu_h = ((cu_r - cu_g) / cu_chroma + 4.0f) / 6.0f;
}
} else {
cu_h = 0.0f;
}
if (cu_M > 0.0f) {
cu_s = cu_chroma / cu_M;
} else {
cu_s = 0.0f;
}
tuple.cu_h = cu_h;
tuple.cu_s = cu_s;
tuple.cu_v = cu_M;
return tuple;
}
__device__ __forceinline__ RgbTuple hsv2rgb_cuda(const float cu_h, const float cu_s, const float cu_v) {
RgbTuple tuple;
const float cu_new_h = cu_h * 6.0f;
const float cu_chroma = cu_v * cu_s;
const float cu_x = cu_chroma * (1.0f - fabsf(fmodf(cu_new_h, 2.0f) - 1.0f));
const float cu_new_m = cu_v - cu_chroma;
const bool cu_between_0_and_1 = cu_new_h >= 0.0f && cu_new_h < 1.0f;
const bool cu_between_1_and_2 = cu_new_h >= 1.0f && cu_new_h < 2.0f;
const bool cu_between_2_and_3 = cu_new_h >= 2.0f && cu_new_h < 3.0f;
const bool cu_between_3_and_4 = cu_new_h >= 3.0f && cu_new_h < 4.0f;
const bool cu_between_4_and_5 = cu_new_h >= 4.0f && cu_new_h < 5.0f;
const bool cu_between_5_and_6 = cu_new_h >= 5.0f && cu_new_h < 6.0f;
tuple.cu_r = cu_chroma * static_cast<float>(cu_between_0_and_1 || cu_between_5_and_6) +
cu_x * static_cast<float>(cu_between_1_and_2 || cu_between_4_and_5) + cu_new_m;
tuple.cu_g = cu_chroma * static_cast<float>(cu_between_1_and_2 || cu_between_2_and_3) +
cu_x * static_cast<float>(cu_between_0_and_1 || cu_between_3_and_4) + cu_new_m;
tuple.cu_b = cu_chroma * static_cast<float>(cu_between_3_and_4 || cu_between_4_and_5) +
cu_x * static_cast<float>(cu_between_2_and_3 || cu_between_5_and_6) + cu_new_m;
return tuple;
}
template <typename T>
__global__ void CalAdjustHueKernel(const size_t cu_input_elements, const T *cu_input, T *cu_output,
const float *cu_hue_delta) {
for (int idx = (blockIdx.x * blockDim.x + threadIdx.x) * 3; idx < cu_input_elements;
idx += gridDim.x * blockDim.x * 3) {
const HsvTuple hsv = rgb2hsv_cuda(static_cast<float>(cu_input[idx]), static_cast<float>(cu_input[idx + 1]),
static_cast<float>(cu_input[idx + 2]));
float cu_new_h = hsv.cu_h;
float cu_new_s = hsv.cu_s;
float cu_new_v = hsv.cu_v;
const float cu_delta = *cu_hue_delta;
cu_new_h = fmodf(hsv.cu_h + cu_delta, 1.0f);
if (cu_new_h < 0.0f) {
cu_new_h = fmodf(1.0f + cu_new_h, 1.0f);
}
const RgbTuple rgb = hsv2rgb_cuda(cu_new_h, cu_new_s, cu_new_v);
cu_output[idx] = static_cast<T>(rgb.cu_r);
cu_output[idx + 1] = static_cast<T>(rgb.cu_g);
cu_output[idx + 2] = static_cast<T>(rgb.cu_b);
}
}
template <typename T>
hipError_t CalAdjusthue(const int input_elements, const T *input, T *output, const float *hue_delta,
const uint32_t &device_id, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( CalAdjustHueKernel), dim3(CUDA_BLOCKS(device_id, input_elements)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream,
input_elements, input, output, hue_delta);
CHECK_CUDA_LAUNCH_SUCCESS();
}
template CUDA_LIB_EXPORT hipError_t CalAdjusthue<float>(const int input_elements, const float *input, float *output,
const float *hue_delta, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT hipError_t CalAdjusthue<half>(const int input_elements, const half *input, half *output,
const float *hue_delta, const uint32_t &device_id,
hipStream_t cuda_stream);
template CUDA_LIB_EXPORT hipError_t CalAdjusthue<double>(const int input_elements, const double *input, double *output,
const float *hue_delta, const uint32_t &device_id,
hipStream_t cuda_stream);
| 95cd074e5e94efb0d6aac2c985ba0f65aa8a2e0d.cu | /**
* Copyright 2022 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "adjusthue_impl.cuh"
#include <algorithm>
#include <cmath>
struct RgbTuple {
float cu_r;
float cu_g;
float cu_b;
};
struct HsvTuple {
float cu_h;
float cu_s;
float cu_v;
};
__device__ __forceinline__ HsvTuple rgb2hsv_cuda(const float cu_r, const float cu_g, const float cu_b) {
HsvTuple tuple;
const float cu_M = max(cu_r, max(cu_g, cu_b));
const float cu_m = min(cu_r, min(cu_g, cu_b));
const float cu_chroma = cu_M - cu_m;
float cu_h = 0.0f;
float cu_s = 0.0f;
if (cu_chroma > 0.0f) {
if (cu_M == cu_r) {
const float cu_num = (cu_g - cu_b) / cu_chroma;
const float cu_sign = copysignf(1.0f, cu_num);
cu_h = ((cu_sign < 0.0f) * 6.0f + cu_sign * fmodf(cu_sign * cu_num, 6.0f)) / 6.0f;
} else if (cu_M == cu_g) {
cu_h = ((cu_b - cu_r) / cu_chroma + 2.0f) / 6.0f;
} else {
cu_h = ((cu_r - cu_g) / cu_chroma + 4.0f) / 6.0f;
}
} else {
cu_h = 0.0f;
}
if (cu_M > 0.0f) {
cu_s = cu_chroma / cu_M;
} else {
cu_s = 0.0f;
}
tuple.cu_h = cu_h;
tuple.cu_s = cu_s;
tuple.cu_v = cu_M;
return tuple;
}
__device__ __forceinline__ RgbTuple hsv2rgb_cuda(const float cu_h, const float cu_s, const float cu_v) {
RgbTuple tuple;
const float cu_new_h = cu_h * 6.0f;
const float cu_chroma = cu_v * cu_s;
const float cu_x = cu_chroma * (1.0f - fabsf(fmodf(cu_new_h, 2.0f) - 1.0f));
const float cu_new_m = cu_v - cu_chroma;
const bool cu_between_0_and_1 = cu_new_h >= 0.0f && cu_new_h < 1.0f;
const bool cu_between_1_and_2 = cu_new_h >= 1.0f && cu_new_h < 2.0f;
const bool cu_between_2_and_3 = cu_new_h >= 2.0f && cu_new_h < 3.0f;
const bool cu_between_3_and_4 = cu_new_h >= 3.0f && cu_new_h < 4.0f;
const bool cu_between_4_and_5 = cu_new_h >= 4.0f && cu_new_h < 5.0f;
const bool cu_between_5_and_6 = cu_new_h >= 5.0f && cu_new_h < 6.0f;
tuple.cu_r = cu_chroma * static_cast<float>(cu_between_0_and_1 || cu_between_5_and_6) +
cu_x * static_cast<float>(cu_between_1_and_2 || cu_between_4_and_5) + cu_new_m;
tuple.cu_g = cu_chroma * static_cast<float>(cu_between_1_and_2 || cu_between_2_and_3) +
cu_x * static_cast<float>(cu_between_0_and_1 || cu_between_3_and_4) + cu_new_m;
tuple.cu_b = cu_chroma * static_cast<float>(cu_between_3_and_4 || cu_between_4_and_5) +
cu_x * static_cast<float>(cu_between_2_and_3 || cu_between_5_and_6) + cu_new_m;
return tuple;
}
template <typename T>
__global__ void CalAdjustHueKernel(const size_t cu_input_elements, const T *cu_input, T *cu_output,
const float *cu_hue_delta) {
for (int idx = (blockIdx.x * blockDim.x + threadIdx.x) * 3; idx < cu_input_elements;
idx += gridDim.x * blockDim.x * 3) {
const HsvTuple hsv = rgb2hsv_cuda(static_cast<float>(cu_input[idx]), static_cast<float>(cu_input[idx + 1]),
static_cast<float>(cu_input[idx + 2]));
float cu_new_h = hsv.cu_h;
float cu_new_s = hsv.cu_s;
float cu_new_v = hsv.cu_v;
const float cu_delta = *cu_hue_delta;
cu_new_h = fmodf(hsv.cu_h + cu_delta, 1.0f);
if (cu_new_h < 0.0f) {
cu_new_h = fmodf(1.0f + cu_new_h, 1.0f);
}
const RgbTuple rgb = hsv2rgb_cuda(cu_new_h, cu_new_s, cu_new_v);
cu_output[idx] = static_cast<T>(rgb.cu_r);
cu_output[idx + 1] = static_cast<T>(rgb.cu_g);
cu_output[idx + 2] = static_cast<T>(rgb.cu_b);
}
}
template <typename T>
cudaError_t CalAdjusthue(const int input_elements, const T *input, T *output, const float *hue_delta,
const uint32_t &device_id, cudaStream_t cuda_stream) {
CalAdjustHueKernel<<<CUDA_BLOCKS(device_id, input_elements), CUDA_THREADS(device_id), 0, cuda_stream>>>(
input_elements, input, output, hue_delta);
CHECK_CUDA_LAUNCH_SUCCESS();
}
template CUDA_LIB_EXPORT cudaError_t CalAdjusthue<float>(const int input_elements, const float *input, float *output,
const float *hue_delta, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT cudaError_t CalAdjusthue<half>(const int input_elements, const half *input, half *output,
const float *hue_delta, const uint32_t &device_id,
cudaStream_t cuda_stream);
template CUDA_LIB_EXPORT cudaError_t CalAdjusthue<double>(const int input_elements, const double *input, double *output,
const float *hue_delta, const uint32_t &device_id,
cudaStream_t cuda_stream);
|
aaff420788423352892812e36ad42579aa872bb8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by salmon on 16-9-6.
//
extern "C"
{
#include <assert.h>
#include "../../spParallel.h"
#include "../../spMesh.h"
#include "../../spField.h"
#include "../../spParticle.h"
#include "../../spAlogorithm.h"
#include "../spParticle.impl.h"
#include "../sp_device.h"
#include "spParallelCUDA.h"
}
#include </usr/local/cuda/include/hiprand/hiprand_kernel.h>
#include "../../spDataType.h"
__global__ void
spParticleBucketInitialize_kernel(dim3 start,
dim3 count,
dim3 strides,
int num_pic,
size_type *start_pos,
size_type *f_count)
{
uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
uint z = __umul24(blockIdx.z, blockDim.z) + threadIdx.z;
if (x < count.x && y < count.y && z < count.z)
{
uint s = __umul24(start.x + x, strides.x) +
__umul24(start.y + y, strides.y) +
__umul24(start.z + z, strides.z);
start_pos[s] = (x * count.y * count.z + y * count.z + z) * num_pic;
f_count[s] = (size_type) num_pic;
}
}
int spParticleBucketInitialize_device(spParticle *sp)
{
spMesh const *m = spMeshAttributeGetMesh((spMeshAttribute const *) sp);
int iform = spMeshAttributeGetForm((spMeshAttribute const *) sp);
size_type num_of_cell = spMeshGetNumberOfEntities(m, SP_DOMAIN_ALL, iform);
size_type num_of_pic = spParticleGetPIC(sp);
size_type *bucket_start, *bucket_count, *sorted_id, *cell_hash;
SP_CALL(spParticleGetBucket(sp, &bucket_start, &bucket_count, &sorted_id, &cell_hash));
SP_CALL(spFillSeq(sorted_id, SP_TYPE_size_type, spParticleCapacity(sp), 0, 1));
SP_CALL(spMemorySet(cell_hash, -1, spParticleCapacity(sp) * sizeof(size_type)));
size_type m_start[3], m_end[3], m_count[3], m_strides[3];
SP_CALL(spMeshGetDomain(m, SP_DOMAIN_CENTER, m_start, m_end, m_count));
SP_CALL(spMeshGetStrides(m, m_strides));
size_type block_dim[3], grid_dim[3];
SP_CALL(spMeshGetDims(m, grid_dim));
SP_CALL(spParallelThreadBlockDecompose(NUMBER_OF_THREADS_PER_BLOCK, grid_dim, block_dim));
SP_CALL_DEVICE_KERNEL(spParticleBucketInitialize_kernel,
sizeType2Dim3(grid_dim), sizeType2Dim3(block_dim),
sizeType2Dim3(m_start), sizeType2Dim3(m_count), sizeType2Dim3(m_strides),
num_of_pic, bucket_start, bucket_count);
return SP_SUCCESS;
}
/**
* copy from cuda example/particle
*/
__global__ void
spParticleBucketBuild_kernel(size_type *cellStart, // output: cell start index
size_type *cellEnd, // output: cell end index
size_type const *gridParticleHash, // input: sorted grid hashes
size_type const *gridParticleIndex,// input: sorted particle indices
size_type numParticles, size_type num_cell)
{
extern __shared__ size_type sharedHash[]; // blockSize + 1 elements
uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
size_type hash;
// handle case when no. of particles not multiple of block size
if (index < numParticles)
{
hash = gridParticleHash[index];
// Load hash data into shared memory so that we can look
// at neighboring particle's hash entity without loading
// two hash values per thread
sharedHash[threadIdx.x + 1] = hash;
if (index > 0 && threadIdx.x == 0)
{
// first thread in block must load neighbor particle hash
sharedHash[0] = gridParticleHash[index - 1];
}
}
spParallelSyncThreads();
if (index < numParticles)
{
// If this particle has a different cell index to the previous
// particle then it must be the first particle in the cell,
// so store the index of this particle in the cell.
// As it isn't the first particle, it must also be the cell end of
// the previous particle's cell
if (index == 0 || hash != sharedHash[threadIdx.x])
{
assert(hash + 1 <= num_cell);
cellStart[hash + 1] = index;
if (index > 0)
{
assert(sharedHash[threadIdx.x] + 1 <= num_cell);
cellEnd[sharedHash[threadIdx.x] + 1] = index;
}
}
if (index == numParticles - 1)
{
assert(hash + 1 <= num_cell);
cellEnd[hash + 1] = index + 1;
}
}
}
__global__ void
_CopyBucketStartCount_kernel(size_type *b_start, // output: cell start index
size_type *b_end, // output: cell end index
size_type *start,
size_type *count,
size_type num_of_cell)
{
size_type index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
if (index < num_of_cell)
{
start[index] = b_start[index + 1];
count[index] = b_end[index + 1] - b_start[index + 1];
assert(b_end[index + 1] >= b_start[index + 1]);
}
}
__host__
int spParticleBucketBuild_device(spParticle *sp)
{
assert(spParticleNeedSorting(sp) == SP_FALSE);
spMesh const *m = spMeshAttributeGetMesh((spMeshAttribute const *) sp);
int iform = spMeshAttributeGetForm((spMeshAttribute const *) sp);
size_type num_of_cell = spMeshGetNumberOfEntities(m, SP_DOMAIN_ALL, iform);
size_type num_of_particle = spParticleSize(sp);
size_type *bucket_start, *bucket_count, *sorted_idx, *cell_hash;
SP_CALL(spParticleGetBucket(sp, &bucket_start, &bucket_count, &sorted_idx, &cell_hash));
size_type *b_start, *b_end;
SP_CALL(spMemoryDeviceAlloc((void **) &b_end, (num_of_cell + 1) * sizeof(size_type)));
SP_CALL(spMemoryDeviceAlloc((void **) &b_start, (num_of_cell + 1) * sizeof(size_type)));
SP_CALL(spMemorySet(b_start, 0, (num_of_cell + 1) * sizeof(size_type)));
SP_CALL(spMemorySet(b_end, 0, (num_of_cell + 1) * sizeof(size_type)));
uint sMemSize = sizeof(size_type) * (NUMBER_OF_THREADS_PER_BLOCK + 1);
SP_DEVICE_CALL_KERNEL2(spParticleBucketBuild_kernel,
num_of_particle / NUMBER_OF_THREADS_PER_BLOCK + 1, NUMBER_OF_THREADS_PER_BLOCK, sMemSize,
b_start, b_end, cell_hash, sorted_idx, num_of_particle, num_of_cell);
SP_CALL_DEVICE_KERNEL(_CopyBucketStartCount_kernel,
num_of_cell / NUMBER_OF_THREADS_PER_BLOCK + 1, NUMBER_OF_THREADS_PER_BLOCK,
b_start, b_end, bucket_start, bucket_count, num_of_cell);
SP_CALL(spMemoryCopy(&num_of_particle, b_start, sizeof(size_type)));
SP_CALL(spParticleResize(sp, num_of_particle));
SP_CALL(spMemoryDeviceFree((void **) &b_start));
SP_CALL(spMemoryDeviceFree((void **) &b_end));
return SP_SUCCESS;
}
__global__ void
spParticleCoordinateConvert(particle_head *sp,
Real3 dx, Real3 min,
size_type const *start_pos,
size_type const *end_pos,
size_type const *sorted_index)
{
uint s0 = __umul24(blockIdx.x, gridDim.x) + __umul24(blockIdx.y, gridDim.y) + __umul24(blockIdx.z, gridDim.z);
__shared__ Real x0, y0, z0;
if (threadIdx.x == 0)
{
x0 = blockIdx.x * dx.x + min.x;
y0 = blockIdx.y * dx.y + min.y;
z0 = blockIdx.z * dx.z + min.z;
}
spParallelSyncThreads();
if (start_pos[s0] + threadIdx.x < end_pos[s0])
{
size_type s = sorted_index[start_pos[s0] + threadIdx.x];
sp->rx[s] += x0;
sp->ry[s] += y0;
sp->rz[s] += z0;
}
};
int spParticleCoordinateLocalToGlobal(spParticle *sp)
{
spMesh const *m = spMeshAttributeGetMesh((spMeshAttribute const *) sp);
uint iform = spMeshAttributeGetForm((spMeshAttribute const *) sp);
Real dx[3], xmin[3], xmax[3];
size_type dims[3];
SP_CALL(spMeshGetGlobalDims(m, dims));
SP_CALL(spMeshGetDx(m, dx));
SP_CALL(spMeshGetBox(m, SP_DOMAIN_ALL, xmin, xmax));
void **p_data;
SP_CALL(spParticleGetAllAttributeData_device(sp, &p_data, NULL));
size_type *start_pos, *end_pos, *index;
SP_CALL(spParticleGetBucket(sp, &start_pos, &end_pos, &index, NULL));
uint3 blockDim;
blockDim.x = NUMBER_OF_THREADS_PER_BLOCK;
blockDim.y = 1;
blockDim.z = 1;
SP_CALL_DEVICE_KERNEL(spParticleCoordinateConvert, sizeType2Dim3(dims), blockDim,
(particle_head *) (p_data), real2Real3(dx), real2Real3(xmin),
start_pos, end_pos, index);
return SP_SUCCESS;
};
/* Number of 64-bit vectors per dimension */
#define VECTOR_SIZE 64
#define CURAND_CALL(x) do { if((x) != HIPRAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
/**
* This kernel initializes state per thread for each of x, y, and z,vx,vy,vz
*/
__global__ void
spRandomGeneratorSobolSetupKernel(unsigned long long *sobolDirectionVectors,
unsigned long long *sobolScrambleConstants,
int num_of_dim, size_type offset,
struct curandStateScrambledSobol64 *state)
{
unsigned int id = threadIdx.x + __umul24(blockDim.x, blockIdx.x);
/* Each thread uses 3 different dimensions */
for (int i = 0; i < num_of_dim; ++i)
{
hiprand_init(sobolDirectionVectors + VECTOR_SIZE * (id * num_of_dim + i),
sobolScrambleConstants[id * num_of_dim + i],
offset,
&(state[id * num_of_dim + i]));
}
}
__global__ void
spRandomDistributionUniformKernel(struct curandStateScrambledSobol64 *state, Real *data, size_type num)
{
unsigned int total_thread_id = threadIdx.x + __umul24(blockDim.x, blockIdx.x);
unsigned int total_thread_num = __umul24(blockDim.x, gridDim.x);
struct curandStateScrambledSobol64 local_state = state[total_thread_id];
for (size_type i = total_thread_id; i < num; i += total_thread_num) { data[i] = hiprand_uniform(&local_state); }
state[total_thread_id] = local_state;
}
__global__ void
spRandomDistributionNormalKernel(struct curandStateScrambledSobol64 *state, Real *data, size_type num)
{
unsigned int total_thread_id = threadIdx.x + __umul24(blockDim.x, blockIdx.x);
unsigned int total_thread_num = __umul24(blockDim.x, gridDim.x);
struct curandStateScrambledSobol64 local_state = state[total_thread_id];
for (size_type i = total_thread_id; i < num; i += total_thread_num) { data[i] = hiprand_normal(&local_state); }
state[total_thread_id] = local_state;
}
int spParticleInitialize_device(Real **data, int n_dims, int const *dist_types, size_type num, size_type offset)
{
int error_code = SP_SUCCESS;
struct curandStateScrambledSobol64 *devSobol64States;
unsigned long long int *devDirectionVectors64;
unsigned long long int *devScrambleConstants64;
size_type n_threads = 64 * VECTOR_SIZE;
hiprandDirectionVectors64_t *hostVectors64;
unsigned long long int *hostScrambleConstants64;
/* Get pointers to the 64 bit scrambled direction vectors and constants*/
CURAND_CALL(hiprandGetDirectionVectors64(&hostVectors64,
CURAND_SCRAMBLED_DIRECTION_VECTORS_64_JOEKUO6));
CURAND_CALL(hiprandGetScrambleConstants64(&hostScrambleConstants64));
/* Allocate memory for 3 states per thread (x, y, z), each state to get a unique dimension */
SP_DEVICE_CALL(hipMalloc((void **) &(devSobol64States), n_threads * n_dims * sizeof(curandStateScrambledSobol64)));
/* Allocate memory and copy 3 sets of vectors per thread to the detail */
SP_DEVICE_CALL(hipMalloc((void **) &(devDirectionVectors64),
n_threads * n_dims * VECTOR_SIZE * sizeof(long long int)));
SP_DEVICE_CALL(hipMemcpy(devDirectionVectors64, hostVectors64,
n_threads * n_dims * VECTOR_SIZE * sizeof(long long int),
hipMemcpyHostToDevice));
/* Allocate memory and copy 6 scramble constants (one costant per dimension)
per thread to the detail */
SP_DEVICE_CALL(hipMalloc((void **) &(devScrambleConstants64),
n_threads * n_dims * sizeof(long long int)));
SP_DEVICE_CALL(hipMemcpy(devScrambleConstants64, hostScrambleConstants64,
n_threads * n_dims * sizeof(long long int),
hipMemcpyHostToDevice));
/* Initialize the states */
SP_CALL_DEVICE_KERNEL(spRandomGeneratorSobolSetupKernel,
n_threads / VECTOR_SIZE, VECTOR_SIZE,
devDirectionVectors64, devScrambleConstants64,
n_dims, offset, devSobol64States);
for (int n = 0; n < n_dims; ++n)
{
switch (dist_types[n])
{
case SP_RAND_NORMAL: //
SP_CALL_DEVICE_KERNEL(spRandomDistributionNormalKernel,
(n_threads / VECTOR_SIZE), VECTOR_SIZE,
(devSobol64States + n * n_threads), data[n], num);
break;
case SP_RAND_UNIFORM:
default://
SP_CALL_DEVICE_KERNEL(spRandomDistributionUniformKernel,
(n_threads / VECTOR_SIZE), VECTOR_SIZE,
(devSobol64States + n * n_threads), data[n], num);
break;
}
}
SP_DEVICE_CALL(hipFree((void *) (devSobol64States)));
SP_DEVICE_CALL(hipFree(devDirectionVectors64));
SP_DEVICE_CALL(hipFree(devScrambleConstants64));
return error_code;
}
| aaff420788423352892812e36ad42579aa872bb8.cu | //
// Created by salmon on 16-9-6.
//
extern "C"
{
#include <assert.h>
#include "../../spParallel.h"
#include "../../spMesh.h"
#include "../../spField.h"
#include "../../spParticle.h"
#include "../../spAlogorithm.h"
#include "../spParticle.impl.h"
#include "../sp_device.h"
#include "spParallelCUDA.h"
}
#include </usr/local/cuda/include/curand_kernel.h>
#include "../../spDataType.h"
__global__ void
spParticleBucketInitialize_kernel(dim3 start,
dim3 count,
dim3 strides,
int num_pic,
size_type *start_pos,
size_type *f_count)
{
uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
uint z = __umul24(blockIdx.z, blockDim.z) + threadIdx.z;
if (x < count.x && y < count.y && z < count.z)
{
uint s = __umul24(start.x + x, strides.x) +
__umul24(start.y + y, strides.y) +
__umul24(start.z + z, strides.z);
start_pos[s] = (x * count.y * count.z + y * count.z + z) * num_pic;
f_count[s] = (size_type) num_pic;
}
}
int spParticleBucketInitialize_device(spParticle *sp)
{
spMesh const *m = spMeshAttributeGetMesh((spMeshAttribute const *) sp);
int iform = spMeshAttributeGetForm((spMeshAttribute const *) sp);
size_type num_of_cell = spMeshGetNumberOfEntities(m, SP_DOMAIN_ALL, iform);
size_type num_of_pic = spParticleGetPIC(sp);
size_type *bucket_start, *bucket_count, *sorted_id, *cell_hash;
SP_CALL(spParticleGetBucket(sp, &bucket_start, &bucket_count, &sorted_id, &cell_hash));
SP_CALL(spFillSeq(sorted_id, SP_TYPE_size_type, spParticleCapacity(sp), 0, 1));
SP_CALL(spMemorySet(cell_hash, -1, spParticleCapacity(sp) * sizeof(size_type)));
size_type m_start[3], m_end[3], m_count[3], m_strides[3];
SP_CALL(spMeshGetDomain(m, SP_DOMAIN_CENTER, m_start, m_end, m_count));
SP_CALL(spMeshGetStrides(m, m_strides));
size_type block_dim[3], grid_dim[3];
SP_CALL(spMeshGetDims(m, grid_dim));
SP_CALL(spParallelThreadBlockDecompose(NUMBER_OF_THREADS_PER_BLOCK, grid_dim, block_dim));
SP_CALL_DEVICE_KERNEL(spParticleBucketInitialize_kernel,
sizeType2Dim3(grid_dim), sizeType2Dim3(block_dim),
sizeType2Dim3(m_start), sizeType2Dim3(m_count), sizeType2Dim3(m_strides),
num_of_pic, bucket_start, bucket_count);
return SP_SUCCESS;
}
/**
* copy from cuda example/particle
*/
__global__ void
spParticleBucketBuild_kernel(size_type *cellStart, // output: cell start index
size_type *cellEnd, // output: cell end index
size_type const *gridParticleHash, // input: sorted grid hashes
size_type const *gridParticleIndex,// input: sorted particle indices
size_type numParticles, size_type num_cell)
{
extern __shared__ size_type sharedHash[]; // blockSize + 1 elements
uint index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
size_type hash;
// handle case when no. of particles not multiple of block size
if (index < numParticles)
{
hash = gridParticleHash[index];
// Load hash data into shared memory so that we can look
// at neighboring particle's hash entity without loading
// two hash values per thread
sharedHash[threadIdx.x + 1] = hash;
if (index > 0 && threadIdx.x == 0)
{
// first thread in block must load neighbor particle hash
sharedHash[0] = gridParticleHash[index - 1];
}
}
spParallelSyncThreads();
if (index < numParticles)
{
// If this particle has a different cell index to the previous
// particle then it must be the first particle in the cell,
// so store the index of this particle in the cell.
// As it isn't the first particle, it must also be the cell end of
// the previous particle's cell
if (index == 0 || hash != sharedHash[threadIdx.x])
{
assert(hash + 1 <= num_cell);
cellStart[hash + 1] = index;
if (index > 0)
{
assert(sharedHash[threadIdx.x] + 1 <= num_cell);
cellEnd[sharedHash[threadIdx.x] + 1] = index;
}
}
if (index == numParticles - 1)
{
assert(hash + 1 <= num_cell);
cellEnd[hash + 1] = index + 1;
}
}
}
__global__ void
_CopyBucketStartCount_kernel(size_type *b_start, // output: cell start index
size_type *b_end, // output: cell end index
size_type *start,
size_type *count,
size_type num_of_cell)
{
size_type index = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
if (index < num_of_cell)
{
start[index] = b_start[index + 1];
count[index] = b_end[index + 1] - b_start[index + 1];
assert(b_end[index + 1] >= b_start[index + 1]);
}
}
__host__
int spParticleBucketBuild_device(spParticle *sp)
{
assert(spParticleNeedSorting(sp) == SP_FALSE);
spMesh const *m = spMeshAttributeGetMesh((spMeshAttribute const *) sp);
int iform = spMeshAttributeGetForm((spMeshAttribute const *) sp);
size_type num_of_cell = spMeshGetNumberOfEntities(m, SP_DOMAIN_ALL, iform);
size_type num_of_particle = spParticleSize(sp);
size_type *bucket_start, *bucket_count, *sorted_idx, *cell_hash;
SP_CALL(spParticleGetBucket(sp, &bucket_start, &bucket_count, &sorted_idx, &cell_hash));
size_type *b_start, *b_end;
SP_CALL(spMemoryDeviceAlloc((void **) &b_end, (num_of_cell + 1) * sizeof(size_type)));
SP_CALL(spMemoryDeviceAlloc((void **) &b_start, (num_of_cell + 1) * sizeof(size_type)));
SP_CALL(spMemorySet(b_start, 0, (num_of_cell + 1) * sizeof(size_type)));
SP_CALL(spMemorySet(b_end, 0, (num_of_cell + 1) * sizeof(size_type)));
uint sMemSize = sizeof(size_type) * (NUMBER_OF_THREADS_PER_BLOCK + 1);
SP_DEVICE_CALL_KERNEL2(spParticleBucketBuild_kernel,
num_of_particle / NUMBER_OF_THREADS_PER_BLOCK + 1, NUMBER_OF_THREADS_PER_BLOCK, sMemSize,
b_start, b_end, cell_hash, sorted_idx, num_of_particle, num_of_cell);
SP_CALL_DEVICE_KERNEL(_CopyBucketStartCount_kernel,
num_of_cell / NUMBER_OF_THREADS_PER_BLOCK + 1, NUMBER_OF_THREADS_PER_BLOCK,
b_start, b_end, bucket_start, bucket_count, num_of_cell);
SP_CALL(spMemoryCopy(&num_of_particle, b_start, sizeof(size_type)));
SP_CALL(spParticleResize(sp, num_of_particle));
SP_CALL(spMemoryDeviceFree((void **) &b_start));
SP_CALL(spMemoryDeviceFree((void **) &b_end));
return SP_SUCCESS;
}
__global__ void
spParticleCoordinateConvert(particle_head *sp,
Real3 dx, Real3 min,
size_type const *start_pos,
size_type const *end_pos,
size_type const *sorted_index)
{
uint s0 = __umul24(blockIdx.x, gridDim.x) + __umul24(blockIdx.y, gridDim.y) + __umul24(blockIdx.z, gridDim.z);
__shared__ Real x0, y0, z0;
if (threadIdx.x == 0)
{
x0 = blockIdx.x * dx.x + min.x;
y0 = blockIdx.y * dx.y + min.y;
z0 = blockIdx.z * dx.z + min.z;
}
spParallelSyncThreads();
if (start_pos[s0] + threadIdx.x < end_pos[s0])
{
size_type s = sorted_index[start_pos[s0] + threadIdx.x];
sp->rx[s] += x0;
sp->ry[s] += y0;
sp->rz[s] += z0;
}
};
int spParticleCoordinateLocalToGlobal(spParticle *sp)
{
spMesh const *m = spMeshAttributeGetMesh((spMeshAttribute const *) sp);
uint iform = spMeshAttributeGetForm((spMeshAttribute const *) sp);
Real dx[3], xmin[3], xmax[3];
size_type dims[3];
SP_CALL(spMeshGetGlobalDims(m, dims));
SP_CALL(spMeshGetDx(m, dx));
SP_CALL(spMeshGetBox(m, SP_DOMAIN_ALL, xmin, xmax));
void **p_data;
SP_CALL(spParticleGetAllAttributeData_device(sp, &p_data, NULL));
size_type *start_pos, *end_pos, *index;
SP_CALL(spParticleGetBucket(sp, &start_pos, &end_pos, &index, NULL));
uint3 blockDim;
blockDim.x = NUMBER_OF_THREADS_PER_BLOCK;
blockDim.y = 1;
blockDim.z = 1;
SP_CALL_DEVICE_KERNEL(spParticleCoordinateConvert, sizeType2Dim3(dims), blockDim,
(particle_head *) (p_data), real2Real3(dx), real2Real3(xmin),
start_pos, end_pos, index);
return SP_SUCCESS;
};
/* Number of 64-bit vectors per dimension */
#define VECTOR_SIZE 64
#define CURAND_CALL(x) do { if((x) != CURAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
/**
* This kernel initializes state per thread for each of x, y, and z,vx,vy,vz
*/
__global__ void
spRandomGeneratorSobolSetupKernel(unsigned long long *sobolDirectionVectors,
unsigned long long *sobolScrambleConstants,
int num_of_dim, size_type offset,
struct curandStateScrambledSobol64 *state)
{
unsigned int id = threadIdx.x + __umul24(blockDim.x, blockIdx.x);
/* Each thread uses 3 different dimensions */
for (int i = 0; i < num_of_dim; ++i)
{
curand_init(sobolDirectionVectors + VECTOR_SIZE * (id * num_of_dim + i),
sobolScrambleConstants[id * num_of_dim + i],
offset,
&(state[id * num_of_dim + i]));
}
}
__global__ void
spRandomDistributionUniformKernel(struct curandStateScrambledSobol64 *state, Real *data, size_type num)
{
unsigned int total_thread_id = threadIdx.x + __umul24(blockDim.x, blockIdx.x);
unsigned int total_thread_num = __umul24(blockDim.x, gridDim.x);
struct curandStateScrambledSobol64 local_state = state[total_thread_id];
for (size_type i = total_thread_id; i < num; i += total_thread_num) { data[i] = curand_uniform(&local_state); }
state[total_thread_id] = local_state;
}
__global__ void
spRandomDistributionNormalKernel(struct curandStateScrambledSobol64 *state, Real *data, size_type num)
{
unsigned int total_thread_id = threadIdx.x + __umul24(blockDim.x, blockIdx.x);
unsigned int total_thread_num = __umul24(blockDim.x, gridDim.x);
struct curandStateScrambledSobol64 local_state = state[total_thread_id];
for (size_type i = total_thread_id; i < num; i += total_thread_num) { data[i] = curand_normal(&local_state); }
state[total_thread_id] = local_state;
}
int spParticleInitialize_device(Real **data, int n_dims, int const *dist_types, size_type num, size_type offset)
{
int error_code = SP_SUCCESS;
struct curandStateScrambledSobol64 *devSobol64States;
unsigned long long int *devDirectionVectors64;
unsigned long long int *devScrambleConstants64;
size_type n_threads = 64 * VECTOR_SIZE;
curandDirectionVectors64_t *hostVectors64;
unsigned long long int *hostScrambleConstants64;
/* Get pointers to the 64 bit scrambled direction vectors and constants*/
CURAND_CALL(curandGetDirectionVectors64(&hostVectors64,
CURAND_SCRAMBLED_DIRECTION_VECTORS_64_JOEKUO6));
CURAND_CALL(curandGetScrambleConstants64(&hostScrambleConstants64));
/* Allocate memory for 3 states per thread (x, y, z), each state to get a unique dimension */
SP_DEVICE_CALL(cudaMalloc((void **) &(devSobol64States), n_threads * n_dims * sizeof(curandStateScrambledSobol64)));
/* Allocate memory and copy 3 sets of vectors per thread to the detail */
SP_DEVICE_CALL(cudaMalloc((void **) &(devDirectionVectors64),
n_threads * n_dims * VECTOR_SIZE * sizeof(long long int)));
SP_DEVICE_CALL(cudaMemcpy(devDirectionVectors64, hostVectors64,
n_threads * n_dims * VECTOR_SIZE * sizeof(long long int),
cudaMemcpyHostToDevice));
/* Allocate memory and copy 6 scramble constants (one costant per dimension)
per thread to the detail */
SP_DEVICE_CALL(cudaMalloc((void **) &(devScrambleConstants64),
n_threads * n_dims * sizeof(long long int)));
SP_DEVICE_CALL(cudaMemcpy(devScrambleConstants64, hostScrambleConstants64,
n_threads * n_dims * sizeof(long long int),
cudaMemcpyHostToDevice));
/* Initialize the states */
SP_CALL_DEVICE_KERNEL(spRandomGeneratorSobolSetupKernel,
n_threads / VECTOR_SIZE, VECTOR_SIZE,
devDirectionVectors64, devScrambleConstants64,
n_dims, offset, devSobol64States);
for (int n = 0; n < n_dims; ++n)
{
switch (dist_types[n])
{
case SP_RAND_NORMAL: //
SP_CALL_DEVICE_KERNEL(spRandomDistributionNormalKernel,
(n_threads / VECTOR_SIZE), VECTOR_SIZE,
(devSobol64States + n * n_threads), data[n], num);
break;
case SP_RAND_UNIFORM:
default://
SP_CALL_DEVICE_KERNEL(spRandomDistributionUniformKernel,
(n_threads / VECTOR_SIZE), VECTOR_SIZE,
(devSobol64States + n * n_threads), data[n], num);
break;
}
}
SP_DEVICE_CALL(cudaFree((void *) (devSobol64States)));
SP_DEVICE_CALL(cudaFree(devDirectionVectors64));
SP_DEVICE_CALL(cudaFree(devScrambleConstants64));
return error_code;
}
|
81301cd495194aa178b0de6e22a3a6f0d4ec88e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <iostream>
// includes, kernels
#include "matrixmul_kernel.hip"
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
bool CompareMatrices(Matrix A, Matrix B);
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P);
int ReadParamsFile(int* params, char* file_name, int num_params);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
Matrix M;
Matrix N;
Matrix P;
int errorM = 0, errorN = 0;
srand(52);
if(argc != 5 && argc != 4)
{
// Allocate and initialize the matrices
M = AllocateMatrix(rand() % 1024, rand() % 1024, 1);
N = AllocateMatrix(M.width, rand() % 1024, 1);
P = AllocateMatrix(M.height, N.width, 0);
}
else
{
// Allocate and read in matrices from disk
int* params = (int*)malloc(3 * sizeof(int));
unsigned data_read = ReadParamsFile(params, argv[1], 3);
if(data_read != 3){
printf("Error reading parameter file\n");
return 1;
}
M = AllocateMatrix(params[0], params[1], 0);
N = AllocateMatrix(params[1], params[2], 0);
P = AllocateMatrix(params[0], params[2], 0);
unsigned sizeM = ReadFile(&M, argv[2]);
unsigned sizeN = ReadFile(&N, argv[3]);
if( (sizeM != M.height * M.width) || (sizeN != N.height * N.width) )
{
printf("Error reading input files %d, %d\n", errorM, errorN);
return 1;
}
}
// M * N on the device
MatrixMulOnDevice(M, N, P);
printf("GPU computation complete\n");
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(P.height, P.width, 0);
computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width);
printf("CPU computation complete\n");
// check if the device result is equivalent to the expected solution
bool res = CompareMatrices(reference, P);
printf("Test %s\n", res ? "PASSED" : "FAILED");
if(argc == 5)
{
WriteFile(P, argv[4]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P)
{
// Load M and N to the device
Matrix Md = AllocateDeviceMatrix(M);
CopyToDeviceMatrix(Md, M);
Matrix Nd = AllocateDeviceMatrix(N);
CopyToDeviceMatrix(Nd, N);
// Allocate P on the device
Matrix Pd = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(Pd, P); // Clear memory
// Setup the execution configuration
dim3 block, grid;
block.x = TILE_WIDTH;
block.y= TILE_WIDTH;
block.z = 1;
grid.x = ceil(Nd.width/(float)block.x);
std::cout<<"Width of M"<<Md.width<<std::endl;
std::cout<<"Grid width"<<grid.x<<std::endl;
grid.y = ceil(Md.height/(float)block.y);
grid.z = 1;
// Launch the device computation threads!
hipLaunchKernelGGL(( MatrixMulKernel), dim3(grid), dim3(block), 0, 0, Md, Nd, Pd);
// Read P from the device
CopyFromDeviceMatrix(P, Pd);
// Free device matrices
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
hipMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a device matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
// If init == 2, initialize matrix parameters, but do not allocate memory
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
// don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand()*3 / (float)RAND_MAX);
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
hipMemcpy(Mdevice.elements, Mhost.elements, size,
hipMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size,
hipMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
hipFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
// Read a floating point matrix in from file
// Returns zero if the number of elements read is
// equals M.height * M.width, and 1 otherwise
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = M->width * M->height;
FILE* input = fopen(file_name, "r");
for (unsigned i = 0; i < data_read; i++)
fscanf(input, "%f", &(M->elements[i]));
return data_read;
}
// Read params of input matrices
int ReadParamsFile(int* params, char* file_name, int num_params)
{
FILE* input = fopen(file_name, "r");
for (unsigned i = 0; i < num_params; i++)
fscanf(input, "%d", &(params[i]));
return num_params;
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
unsigned int size = M.width * M.height;
FILE* output = fopen(file_name, "w");
for (unsigned i = 0; i < size; i++) {
fprintf(output, "%f ", M.elements[i]);
}
}
// returns true iff A and B have same elements in same order
bool CompareMatrices(Matrix A, Matrix B) {
unsigned int size = A.width * A.height;
if ( (A.width != B.width) || (A.height != B.height) )
return false;
for (unsigned i = 0; i < size; i++)
if (abs(A.elements[i] - B.elements[i]) > 0.0001f)
return false;
return true;
}
| 81301cd495194aa178b0de6e22a3a6f0d4ec88e1.cu | /* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <iostream>
// includes, kernels
#include "matrixmul_kernel.cu"
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void FreeDeviceMatrix(Matrix* M);
void FreeMatrix(Matrix* M);
bool CompareMatrices(Matrix A, Matrix B);
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P);
int ReadParamsFile(int* params, char* file_name, int num_params);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
Matrix M;
Matrix N;
Matrix P;
int errorM = 0, errorN = 0;
srand(52);
if(argc != 5 && argc != 4)
{
// Allocate and initialize the matrices
M = AllocateMatrix(rand() % 1024, rand() % 1024, 1);
N = AllocateMatrix(M.width, rand() % 1024, 1);
P = AllocateMatrix(M.height, N.width, 0);
}
else
{
// Allocate and read in matrices from disk
int* params = (int*)malloc(3 * sizeof(int));
unsigned data_read = ReadParamsFile(params, argv[1], 3);
if(data_read != 3){
printf("Error reading parameter file\n");
return 1;
}
M = AllocateMatrix(params[0], params[1], 0);
N = AllocateMatrix(params[1], params[2], 0);
P = AllocateMatrix(params[0], params[2], 0);
unsigned sizeM = ReadFile(&M, argv[2]);
unsigned sizeN = ReadFile(&N, argv[3]);
if( (sizeM != M.height * M.width) || (sizeN != N.height * N.width) )
{
printf("Error reading input files %d, %d\n", errorM, errorN);
return 1;
}
}
// M * N on the device
MatrixMulOnDevice(M, N, P);
printf("GPU computation complete\n");
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(P.height, P.width, 0);
computeGold(reference.elements, M.elements, N.elements, M.height, M.width, N.width);
printf("CPU computation complete\n");
// check if the device result is equivalent to the expected solution
bool res = CompareMatrices(reference, P);
printf("Test %s\n", res ? "PASSED" : "FAILED");
if(argc == 5)
{
WriteFile(P, argv[4]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free matrices
FreeMatrix(&M);
FreeMatrix(&N);
FreeMatrix(&P);
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P)
{
// Load M and N to the device
Matrix Md = AllocateDeviceMatrix(M);
CopyToDeviceMatrix(Md, M);
Matrix Nd = AllocateDeviceMatrix(N);
CopyToDeviceMatrix(Nd, N);
// Allocate P on the device
Matrix Pd = AllocateDeviceMatrix(P);
CopyToDeviceMatrix(Pd, P); // Clear memory
// Setup the execution configuration
dim3 block, grid;
block.x = TILE_WIDTH;
block.y= TILE_WIDTH;
block.z = 1;
grid.x = ceil(Nd.width/(float)block.x);
std::cout<<"Width of M"<<Md.width<<std::endl;
std::cout<<"Grid width"<<grid.x<<std::endl;
grid.y = ceil(Md.height/(float)block.y);
grid.z = 1;
// Launch the device computation threads!
MatrixMulKernel<<<grid, block>>>(Md, Nd, Pd);
// Read P from the device
CopyFromDeviceMatrix(P, Pd);
// Free device matrices
FreeDeviceMatrix(&Md);
FreeDeviceMatrix(&Nd);
FreeDeviceMatrix(&Pd);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
cudaMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a device matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
// If init == 2, initialize matrix parameters, but do not allocate memory
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
// don't allocate memory on option 2
if(init == 2)
return M;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand()*3 / (float)RAND_MAX);
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
cudaMemcpy(Mdevice.elements, Mhost.elements, size,
cudaMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size,
cudaMemcpyDeviceToHost);
}
// Free a device matrix.
void FreeDeviceMatrix(Matrix* M)
{
cudaFree(M->elements);
M->elements = NULL;
}
// Free a host Matrix
void FreeMatrix(Matrix* M)
{
free(M->elements);
M->elements = NULL;
}
// Read a floating point matrix in from file
// Returns zero if the number of elements read is
// equals M.height * M.width, and 1 otherwise
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = M->width * M->height;
FILE* input = fopen(file_name, "r");
for (unsigned i = 0; i < data_read; i++)
fscanf(input, "%f", &(M->elements[i]));
return data_read;
}
// Read params of input matrices
int ReadParamsFile(int* params, char* file_name, int num_params)
{
FILE* input = fopen(file_name, "r");
for (unsigned i = 0; i < num_params; i++)
fscanf(input, "%d", &(params[i]));
return num_params;
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
unsigned int size = M.width * M.height;
FILE* output = fopen(file_name, "w");
for (unsigned i = 0; i < size; i++) {
fprintf(output, "%f ", M.elements[i]);
}
}
// returns true iff A and B have same elements in same order
bool CompareMatrices(Matrix A, Matrix B) {
unsigned int size = A.width * A.height;
if ( (A.width != B.width) || (A.height != B.height) )
return false;
for (unsigned i = 0; i < size; i++)
if (abs(A.elements[i] - B.elements[i]) > 0.0001f)
return false;
return true;
}
|
5ce2a34bd23c010bfacd8bdc2cc2efcacd0a921d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Some basic functions for mtx reading and formating
*
* Author: Petros Anastasiadis([email protected])
*/
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include "alloc.h"
#include "input.h"
#include <hip/hip_runtime.h>
#include <cusp/io/matrix_market.h>
int mtx_read1(int ** csrRow, int ** cooCol, double ** cooVal, int * n, int * m, int * n_z, char * name)
{
cusp::csr_matrix<int, double, cusp::host_memory> matrix;
// load a matrix stored in MatrixMarket format
cusp::io::read_matrix_market_file(matrix,name);
/*save the matrix information*/
*n = matrix.num_rows;
*m = matrix.num_cols;
*n_z = matrix.num_entries;
hipMallocManaged(csrRow, (*n+1)*sizeof(int));
hipMallocManaged(cooCol, *n_z*sizeof(int));
hipMallocManaged(cooVal, *n_z*sizeof(double));
hipDeviceSynchronize();
/*copy the elements*/
//numBytes = (*n + 1) * sizeof(int);
for (int i=0; i < (*n+1); i++) (*csrRow)[i] = matrix.row_offsets[i] ;
//hipMemcpy(csrRow, &matrix.row_offsets[0], numBytes,hipMemcpyHostToHost);
//CudaCheckError();
//numBytes = *n_z * sizeof(int);
for (int i=0; i < *n_z; i++) (*cooCol)[i] = matrix.column_indices[i] ;
//hipMemcpy(cooCol, &matrix.column_indices[0], numBytes,hipMemcpyHostToHost);
//CudaCheckError();
//numBytes = *n_z * sizeof(double);
for (int i=0; i < *n_z; i++) (*cooVal)[i] = matrix.values[i] ;
//hipMemcpy(cooVal, &matrix.values[0], numBytes,hipMemcpyHostToHost);
//CudaCheckError();
return 1;
}
int mtx_read(int ** I, int ** cooCol, double ** cooVal, int * n, int * m, int * n_z, char * name)
{
char c;
char *type, *format, *var_type, *symmetry, *string=NULL;
FILE *fp ;
size_t len=0;
if ((fp=fopen(name, "r"))==NULL){
printf("Problem in read pass\n");
exit(1);
}
getline(&string, &len, fp);
strtok(string," ");
type = strtok(NULL," ");
format = strtok(NULL," ");
var_type = strtok(NULL," ");
symmetry = strtok(NULL,"\n");
//printf("type=%s, format=%s, var_type=%s, ", type, format, var_type);
if (strcmp(type,"matrix")){
printf("type=%s unsupported...terminating\n\n\n\n\n\n\n\n\n\n\n\n", type);
exit(1);
}
if (strcmp(format,"coordinate") ){
printf("format=%s unsupported...terminating\n\n\n\n\n\n\n\n\n\n\n\n", format);
exit(1);
}
if (strcmp(var_type,"integer") && strcmp(var_type,"real") && strcmp(var_type,"pattern")){
printf("Var_type=%s unsupported...terminating\n\n\n\n\n\n\n\n\n\n\n\n", var_type);
exit(1);
}
while((c=getc(fp))=='%') while( (c=getc(fp))!='\n') ;
ungetc(c, fp);
int k, lines = 0, sym_k=0;
fscanf(fp,"%d %d %d", n, m, &lines);
//printf("n=%d, m=%d, lines=%d, ", *n, *m, lines);
*n_z = 0;
if (!strcmp(symmetry,"symmetric")){
get_nz_symmetric(n_z, name);
//printf("symmetry=symmetric\n");
}
else if (!strcmp(symmetry,"general")) {
*n_z=lines;
//printf("symmetry=general\n");
}
else {
printf("Invalid symmetry value:%s\n", symmetry);
return 0;
}
//printf("n_z=%d\n", *n_z);
hipMallocManaged(I, *n_z*sizeof(int));
hipMallocManaged(cooCol, *n_z*sizeof(int));
hipMallocManaged(cooVal, *n_z*sizeof(double));
double dum;
if ( !*I || !*cooCol || !*cooVal ) return 0;
if (!strcmp(symmetry,"symmetric")){
for (k = 0; k < lines; k++) {
if (!strcmp(var_type,"pattern")) {
fscanf(fp,"%d %d", &((*I)[sym_k]), &((*cooCol)[sym_k]));
(*cooVal)[sym_k]= 1.0;
}
else {
fscanf(fp,"%d %d %lf", &((*I)[sym_k]), &((*cooCol)[sym_k]), &dum);
(*cooVal)[sym_k]=(double) dum;
}
(*I)[sym_k]--;
(*cooCol)[sym_k]--;
sym_k++;
if ((*I)[sym_k-1] != (*cooCol)[sym_k-1]) {
(*I)[sym_k] = (*cooCol)[sym_k-1];
(*cooCol)[sym_k] = (*I)[sym_k-1];
(*cooVal)[sym_k] = (*cooVal)[sym_k-1];
sym_k++;
}
}
if (sym_k!=*n_z){
printf("Error in symmetric read: sym_k=%d n_z=%d\n", sym_k, *n_z);
return 0;
}
}
else if (!strcmp(symmetry,"general"))
{
for (k = 0; k < lines; k++){
if (!strcmp(var_type,"pattern")) {
fscanf(fp,"%d %d", &((*I)[sym_k]), &((*cooCol)[sym_k]));
(*cooVal)[sym_k]= 1.0;
}
else {
fscanf(fp,"%d %d %lf", &((*I)[sym_k]), &((*cooCol)[sym_k]), &dum);
(*cooVal)[sym_k]=(double) dum;
}
(*I)[k]--;
(*cooCol)[k]--;
}
}
quickSort( *I, *cooCol, *cooVal, 0, *n_z-1);
fclose(fp);
return 1;
}
void get_nz_symmetric( int * n_z, char* name)
{
char c;
FILE *fp ;
if ((fp=fopen(name, "r"))==NULL){
printf("Problem in symmetric read pass\n");
exit(1);
}
while((c=getc(fp))=='%') while( (c=getc(fp))!='\n') ;
ungetc(c, fp);
int k, i, j, n, m, lines;
double x;
fscanf(fp,"%d %d %d", &n, &m, &lines);
for (k = 0; k < lines; k++){
fscanf(fp,"%d %d %lf", &i, &j, &x);
(*n_z)++;
if(i!=j) (*n_z)++;
}
}
void csr_transform(float ** A, int n, int m, int n_z, float *csrValA, int *csrRowPtrA, int *csrColIndA)
{
int i,j,k=0;
for (i = 0; i < n; i++){
csrRowPtrA[i]=k;
for (j = 0; j < m; j++){
if (A[i][j]!=0.0){
csrValA[k]=A[i][j];
csrColIndA[k]= j;
k++;
}
}
}
csrRowPtrA[i]=k;
if (k!=n_z) printf("Error at non zeroes: %d\n", k-n_z);
return;
}
void quickSort( int *a, int * b, double * c, int l, int r)
{
int j;
if( l < r )
{ // divide and conquer
j = partition( a, b, c, l, r);
quickSort( a, b, c, l, j-1);
quickSort( a, b, c, j+1, r);
}
}
int partition( int *a, int * b, double * c, int l, int r)
{
int pivot, i, j, t;
double t1;
pivot = a[l];
i = l; j = r+1;
while(1)
{
do ++i; while( a[i] <= pivot && i <= r );
do --j; while( a[j] > pivot );
if( i >= j ) break;
t = a[i]; a[i] = a[j]; a[j] = t;
t = b[i]; b[i] = b[j]; b[j] = t;
t1 = c[i]; c[i] = c[j]; c[j] = t1;
}
t = a[l]; a[l] = a[j]; a[j] = t;
t = b[l]; b[l] = b[j]; b[j] = t;
t1 = c[l]; c[l] = c[j]; c[j] = t1;
return j;
}
| 5ce2a34bd23c010bfacd8bdc2cc2efcacd0a921d.cu | /*
* Some basic functions for mtx reading and formating
*
* Author: Petros Anastasiadis([email protected])
*/
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include "alloc.h"
#include "input.h"
#include <cuda_runtime.h>
#include <cusp/io/matrix_market.h>
int mtx_read1(int ** csrRow, int ** cooCol, double ** cooVal, int * n, int * m, int * n_z, char * name)
{
cusp::csr_matrix<int, double, cusp::host_memory> matrix;
// load a matrix stored in MatrixMarket format
cusp::io::read_matrix_market_file(matrix,name);
/*save the matrix information*/
*n = matrix.num_rows;
*m = matrix.num_cols;
*n_z = matrix.num_entries;
cudaMallocManaged(csrRow, (*n+1)*sizeof(int));
cudaMallocManaged(cooCol, *n_z*sizeof(int));
cudaMallocManaged(cooVal, *n_z*sizeof(double));
cudaDeviceSynchronize();
/*copy the elements*/
//numBytes = (*n + 1) * sizeof(int);
for (int i=0; i < (*n+1); i++) (*csrRow)[i] = matrix.row_offsets[i] ;
//cudaMemcpy(csrRow, &matrix.row_offsets[0], numBytes,cudaMemcpyHostToHost);
//CudaCheckError();
//numBytes = *n_z * sizeof(int);
for (int i=0; i < *n_z; i++) (*cooCol)[i] = matrix.column_indices[i] ;
//cudaMemcpy(cooCol, &matrix.column_indices[0], numBytes,cudaMemcpyHostToHost);
//CudaCheckError();
//numBytes = *n_z * sizeof(double);
for (int i=0; i < *n_z; i++) (*cooVal)[i] = matrix.values[i] ;
//cudaMemcpy(cooVal, &matrix.values[0], numBytes,cudaMemcpyHostToHost);
//CudaCheckError();
return 1;
}
int mtx_read(int ** I, int ** cooCol, double ** cooVal, int * n, int * m, int * n_z, char * name)
{
char c;
char *type, *format, *var_type, *symmetry, *string=NULL;
FILE *fp ;
size_t len=0;
if ((fp=fopen(name, "r"))==NULL){
printf("Problem in read pass\n");
exit(1);
}
getline(&string, &len, fp);
strtok(string," ");
type = strtok(NULL," ");
format = strtok(NULL," ");
var_type = strtok(NULL," ");
symmetry = strtok(NULL,"\n");
//printf("type=%s, format=%s, var_type=%s, ", type, format, var_type);
if (strcmp(type,"matrix")){
printf("type=%s unsupported...terminating\n\n\n\n\n\n\n\n\n\n\n\n", type);
exit(1);
}
if (strcmp(format,"coordinate") ){
printf("format=%s unsupported...terminating\n\n\n\n\n\n\n\n\n\n\n\n", format);
exit(1);
}
if (strcmp(var_type,"integer") && strcmp(var_type,"real") && strcmp(var_type,"pattern")){
printf("Var_type=%s unsupported...terminating\n\n\n\n\n\n\n\n\n\n\n\n", var_type);
exit(1);
}
while((c=getc(fp))=='%') while( (c=getc(fp))!='\n') ;
ungetc(c, fp);
int k, lines = 0, sym_k=0;
fscanf(fp,"%d %d %d", n, m, &lines);
//printf("n=%d, m=%d, lines=%d, ", *n, *m, lines);
*n_z = 0;
if (!strcmp(symmetry,"symmetric")){
get_nz_symmetric(n_z, name);
//printf("symmetry=symmetric\n");
}
else if (!strcmp(symmetry,"general")) {
*n_z=lines;
//printf("symmetry=general\n");
}
else {
printf("Invalid symmetry value:%s\n", symmetry);
return 0;
}
//printf("n_z=%d\n", *n_z);
cudaMallocManaged(I, *n_z*sizeof(int));
cudaMallocManaged(cooCol, *n_z*sizeof(int));
cudaMallocManaged(cooVal, *n_z*sizeof(double));
double dum;
if ( !*I || !*cooCol || !*cooVal ) return 0;
if (!strcmp(symmetry,"symmetric")){
for (k = 0; k < lines; k++) {
if (!strcmp(var_type,"pattern")) {
fscanf(fp,"%d %d", &((*I)[sym_k]), &((*cooCol)[sym_k]));
(*cooVal)[sym_k]= 1.0;
}
else {
fscanf(fp,"%d %d %lf", &((*I)[sym_k]), &((*cooCol)[sym_k]), &dum);
(*cooVal)[sym_k]=(double) dum;
}
(*I)[sym_k]--;
(*cooCol)[sym_k]--;
sym_k++;
if ((*I)[sym_k-1] != (*cooCol)[sym_k-1]) {
(*I)[sym_k] = (*cooCol)[sym_k-1];
(*cooCol)[sym_k] = (*I)[sym_k-1];
(*cooVal)[sym_k] = (*cooVal)[sym_k-1];
sym_k++;
}
}
if (sym_k!=*n_z){
printf("Error in symmetric read: sym_k=%d n_z=%d\n", sym_k, *n_z);
return 0;
}
}
else if (!strcmp(symmetry,"general"))
{
for (k = 0; k < lines; k++){
if (!strcmp(var_type,"pattern")) {
fscanf(fp,"%d %d", &((*I)[sym_k]), &((*cooCol)[sym_k]));
(*cooVal)[sym_k]= 1.0;
}
else {
fscanf(fp,"%d %d %lf", &((*I)[sym_k]), &((*cooCol)[sym_k]), &dum);
(*cooVal)[sym_k]=(double) dum;
}
(*I)[k]--;
(*cooCol)[k]--;
}
}
quickSort( *I, *cooCol, *cooVal, 0, *n_z-1);
fclose(fp);
return 1;
}
void get_nz_symmetric( int * n_z, char* name)
{
char c;
FILE *fp ;
if ((fp=fopen(name, "r"))==NULL){
printf("Problem in symmetric read pass\n");
exit(1);
}
while((c=getc(fp))=='%') while( (c=getc(fp))!='\n') ;
ungetc(c, fp);
int k, i, j, n, m, lines;
double x;
fscanf(fp,"%d %d %d", &n, &m, &lines);
for (k = 0; k < lines; k++){
fscanf(fp,"%d %d %lf", &i, &j, &x);
(*n_z)++;
if(i!=j) (*n_z)++;
}
}
void csr_transform(float ** A, int n, int m, int n_z, float *csrValA, int *csrRowPtrA, int *csrColIndA)
{
int i,j,k=0;
for (i = 0; i < n; i++){
csrRowPtrA[i]=k;
for (j = 0; j < m; j++){
if (A[i][j]!=0.0){
csrValA[k]=A[i][j];
csrColIndA[k]= j;
k++;
}
}
}
csrRowPtrA[i]=k;
if (k!=n_z) printf("Error at non zeroes: %d\n", k-n_z);
return;
}
void quickSort( int *a, int * b, double * c, int l, int r)
{
int j;
if( l < r )
{ // divide and conquer
j = partition( a, b, c, l, r);
quickSort( a, b, c, l, j-1);
quickSort( a, b, c, j+1, r);
}
}
int partition( int *a, int * b, double * c, int l, int r)
{
int pivot, i, j, t;
double t1;
pivot = a[l];
i = l; j = r+1;
while(1)
{
do ++i; while( a[i] <= pivot && i <= r );
do --j; while( a[j] > pivot );
if( i >= j ) break;
t = a[i]; a[i] = a[j]; a[j] = t;
t = b[i]; b[i] = b[j]; b[j] = t;
t1 = c[i]; c[i] = c[j]; c[j] = t1;
}
t = a[l]; a[l] = a[j]; a[j] = t;
t = b[l]; b[l] = b[j]; b[j] = t;
t1 = c[l]; c[l] = c[j]; c[j] = t1;
return j;
}
|
5ae5c3120dbb0fa250ab74f14111b62f31a1ff99.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Naive Example of Matrix Addition
*
*/
/**
* Matrix multiplication: C = A + B.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
/* SMC change: One line added */
#include "smc.h"
/**
* Matrix addition (CUDA Kernel) on the device: C = A + B
* w is matrix width, h is matrix height
*/
__global__ void
matrixAddCUDA(float *C, float *A, float *B, int w, int h,
dim3 __SMC_orgGridDim, int __SMC_workersNeeded, int *__SMC_workerCount, int * __SMC_newChunkSeq, int * __SMC_seqEnds) /* SMC change: six extra parameters added to the call at the end */
{
__SMC_Begin /* SMC change: a line added */
/* SMC change: replacing the usage of blockIdx.x and blockIdx.y with references to __SMC_chunkID */
// Block index
//int bx = blockIdx.x;
//int by = blockIdx.y;
int bx = (int)fmodf((float)__SMC_chunkID, (float)__SMC_orgGridDim.x);
int by = (int)(__SMC_chunkID/__SMC_orgGridDim.x);
// Thread local index
int txl = threadIdx.x;
int tyl = threadIdx.y;
// Thread global index
int tx = txl+bx*blockDim.x;
int ty = tyl+by*blockDim.y;
int glbIdx = ty*w+tx;
int maxidx = w*h-1;
if (glbIdx<0 || glbIdx>maxidx){
printf("Error: glbIdx is %d.\n", glbIdx);
}
else{
// Do addition
C[glbIdx] = A[glbIdx] + B[glbIdx];
}
__SMC_End /* SMC change: a line added */
}
void constantInit(float *data, int size)
{
for (int i = 0; i < size; ++i)
{
data[i] = (float)rand()/RAND_MAX;
}
}
int matrixAdd_gold(float *A, float *B, float*C, int size){
for (int i=0;i<size;i++)
C[i] = A[i] + B[i];
return 0;
}
/**
* A wrapper that calls the GPU kernel
*/
int matrixAdd(int block_size, int w, int h)
{
// Allocate host memory for matrices A and B
unsigned int sz = w*h;
unsigned int mem_size = sizeof(float) * sz;
float *h_A = (float *)malloc(mem_size);
float *h_B = (float *)malloc(mem_size);
float *h_C = (float *) malloc(mem_size);
// Initialize host memory
constantInit(h_A, sz);
constantInit(h_B, sz);
// Allocate device memory
float *d_A, *d_B, *d_C;
hipError_t error;
error = hipMalloc((void **) &d_A, mem_size);
error = hipMalloc((void **) &d_B, mem_size);
error = hipMalloc((void **) &d_C, mem_size);
// copy host memory to device
error = hipMemcpy(d_A, h_A, mem_size, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = hipMemcpy(d_B, h_B, mem_size, hipMemcpyHostToDevice);
if (error != hipSuccess)
{
printf("hipMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
/* SMC change: here, the call to grid() was removed, instead, this new variable is created based on the parameters in that original call */
dim3 __SMC_orgGridDim(w / threads.x, h / threads.y);
printf("Computing result using CUDA Kernel...\n");
/* SMC change: a line added */
__SMC_init();
/* SMC change: added four parameters to the call at the end */
hipLaunchKernelGGL(( matrixAddCUDA), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, w, h, __SMC_orgGridDim, __SMC_workersNeeded, __SMC_workerCount, __SMC_newChunkSeq, __SMC_seqEnds); /* SMC change:five extra parameters are used at the end*/
printf("done\n");
hipDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
hipEvent_t start;
error = hipEventCreate(&start);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipEvent_t stop;
error = hipEventCreate(&stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Copy result from device to host
error = hipMemcpy(h_C, d_C, mem_size, hipMemcpyDeviceToHost);
if (error != hipSuccess)
{
printf("hipMemcpy (h_C,d_C) returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
/* check the result correctness */
float g_sum = 0, c_sum=0;
for (int i=0;i<w*h;i++) g_sum += h_C[i];
matrixAdd_gold(h_A, h_B, h_C, w*h);
for (int i=0;i<w*h;i++) c_sum += h_C[i];
if (abs(g_sum - c_sum)<1e-10){
printf("Pass...\n");
}
else{
printf("Fail: %f vs. %f.\n", g_sum, c_sum);
}
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
return 0;
}
/**
* Program main
*/
int main(int argc, char **argv)
{
printf("[Matrix Addition Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf(" -w=Width -h=Height (Width x Height of Matrix)\n");
printf(" Note: w and h should be multiples of 32, and neither shall exceed 1024.\n");
exit(EXIT_SUCCESS);
}
int block_size = 32;
int w=1024;
int h=1024;
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "w"))
{
w = getCmdLineArgumentInt(argc, (const char **)argv, "w");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "h"))
{
h = getCmdLineArgumentInt(argc, (const char **)argv, "h");
}
if (w>1024 || h>1024 || fmod(w,32) || fmod(h,32))
{
printf("Error: w and h should be multiples of 32, and neither shall exceed 1024.\n");
exit(EXIT_FAILURE);
}
printf("block_size=%d, matrix width=%d, matrix height=%d\n", block_size, w,h);
int matrix_result = matrixAdd(block_size, w, h);
exit(matrix_result);
}
| 5ae5c3120dbb0fa250ab74f14111b62f31a1ff99.cu | /**
* Naive Example of Matrix Addition
*
*/
/**
* Matrix multiplication: C = A + B.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
/* SMC change: One line added */
#include "smc.h"
/**
* Matrix addition (CUDA Kernel) on the device: C = A + B
* w is matrix width, h is matrix height
*/
__global__ void
matrixAddCUDA(float *C, float *A, float *B, int w, int h,
dim3 __SMC_orgGridDim, int __SMC_workersNeeded, int *__SMC_workerCount, int * __SMC_newChunkSeq, int * __SMC_seqEnds) /* SMC change: six extra parameters added to the call at the end */
{
__SMC_Begin /* SMC change: a line added */
/* SMC change: replacing the usage of blockIdx.x and blockIdx.y with references to __SMC_chunkID */
// Block index
//int bx = blockIdx.x;
//int by = blockIdx.y;
int bx = (int)fmodf((float)__SMC_chunkID, (float)__SMC_orgGridDim.x);
int by = (int)(__SMC_chunkID/__SMC_orgGridDim.x);
// Thread local index
int txl = threadIdx.x;
int tyl = threadIdx.y;
// Thread global index
int tx = txl+bx*blockDim.x;
int ty = tyl+by*blockDim.y;
int glbIdx = ty*w+tx;
int maxidx = w*h-1;
if (glbIdx<0 || glbIdx>maxidx){
printf("Error: glbIdx is %d.\n", glbIdx);
}
else{
// Do addition
C[glbIdx] = A[glbIdx] + B[glbIdx];
}
__SMC_End /* SMC change: a line added */
}
void constantInit(float *data, int size)
{
for (int i = 0; i < size; ++i)
{
data[i] = (float)rand()/RAND_MAX;
}
}
int matrixAdd_gold(float *A, float *B, float*C, int size){
for (int i=0;i<size;i++)
C[i] = A[i] + B[i];
return 0;
}
/**
* A wrapper that calls the GPU kernel
*/
int matrixAdd(int block_size, int w, int h)
{
// Allocate host memory for matrices A and B
unsigned int sz = w*h;
unsigned int mem_size = sizeof(float) * sz;
float *h_A = (float *)malloc(mem_size);
float *h_B = (float *)malloc(mem_size);
float *h_C = (float *) malloc(mem_size);
// Initialize host memory
constantInit(h_A, sz);
constantInit(h_B, sz);
// Allocate device memory
float *d_A, *d_B, *d_C;
cudaError_t error;
error = cudaMalloc((void **) &d_A, mem_size);
error = cudaMalloc((void **) &d_B, mem_size);
error = cudaMalloc((void **) &d_C, mem_size);
// copy host memory to device
error = cudaMemcpy(d_A, h_A, mem_size, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_A,h_A) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(d_B, h_B, mem_size, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_B,h_B) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
// Setup execution parameters
dim3 threads(block_size, block_size);
/* SMC change: here, the call to grid() was removed, instead, this new variable is created based on the parameters in that original call */
dim3 __SMC_orgGridDim(w / threads.x, h / threads.y);
printf("Computing result using CUDA Kernel...\n");
/* SMC change: a line added */
__SMC_init();
/* SMC change: added four parameters to the call at the end */
matrixAddCUDA<<< grid, threads >>>(d_C, d_A, d_B, w, h, __SMC_orgGridDim, __SMC_workersNeeded, __SMC_workerCount, __SMC_newChunkSeq, __SMC_seqEnds); /* SMC change:five extra parameters are used at the end*/
printf("done\n");
cudaDeviceSynchronize();
// Allocate CUDA events that we'll use for timing
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Copy result from device to host
error = cudaMemcpy(h_C, d_C, mem_size, cudaMemcpyDeviceToHost);
if (error != cudaSuccess)
{
printf("cudaMemcpy (h_C,d_C) returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
/* check the result correctness */
float g_sum = 0, c_sum=0;
for (int i=0;i<w*h;i++) g_sum += h_C[i];
matrixAdd_gold(h_A, h_B, h_C, w*h);
for (int i=0;i<w*h;i++) c_sum += h_C[i];
if (abs(g_sum - c_sum)<1e-10){
printf("Pass...\n");
}
else{
printf("Fail: %f vs. %f.\n", g_sum, c_sum);
}
// Clean up memory
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
/**
* Program main
*/
int main(int argc, char **argv)
{
printf("[Matrix Addition Using CUDA] - Starting...\n");
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?"))
{
printf(" -w=Width -h=Height (Width x Height of Matrix)\n");
printf(" Note: w and h should be multiples of 32, and neither shall exceed 1024.\n");
exit(EXIT_SUCCESS);
}
int block_size = 32;
int w=1024;
int h=1024;
// width of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "w"))
{
w = getCmdLineArgumentInt(argc, (const char **)argv, "w");
}
// height of Matrix A
if (checkCmdLineFlag(argc, (const char **)argv, "h"))
{
h = getCmdLineArgumentInt(argc, (const char **)argv, "h");
}
if (w>1024 || h>1024 || fmod(w,32) || fmod(h,32))
{
printf("Error: w and h should be multiples of 32, and neither shall exceed 1024.\n");
exit(EXIT_FAILURE);
}
printf("block_size=%d, matrix width=%d, matrix height=%d\n", block_size, w,h);
int matrix_result = matrixAdd(block_size, w, h);
exit(matrix_result);
}
|
dabb741cb1769379a3adcc26e8ff505c74d0e172.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "hl_base.h"
#include "hl_device_functions.cuh"
#include "paddle/utils/Logging.h"
__global__ void KeMaxSequenceForward(real *input,
const int *sequence,
real* output,
int *index,
int numSequences,
int dim) {
int dimIdx = threadIdx.x;
int sequenceId = blockIdx.x;
if (sequenceId >= numSequences) return;
int start = sequence[sequenceId];
int end = sequence[sequenceId+1];
for (int i = dimIdx; i < dim; i += blockDim.x) {
real tmp = -HL_FLOAT_MAX;
int tmpId = -1;
for (int insId = start; insId < end; insId++) {
if (tmp < input[insId*dim + i]) {
tmp = input[insId*dim + i];
tmpId = insId;
}
}
output[sequenceId*dim + i] = tmp;
index[sequenceId*dim + i] = tmpId;
}
}
void hl_max_sequence_forward(real* input,
const int* sequence,
real* output,
int *index,
int numSequences,
int dim) {
CHECK_NOTNULL(input);
CHECK_NOTNULL(sequence);
CHECK_NOTNULL(output);
CHECK_NOTNULL(index);
dim3 threads(256, 1);
dim3 grid(numSequences, 1);
hipLaunchKernelGGL(( KeMaxSequenceForward), dim3(grid), dim3(threads), 0, STREAM_DEFAULT ,
input, sequence, output, index, numSequences, dim);
CHECK_SYNC("hl_max_sequence_forward failed");
}
__global__ void KeMaxSequenceBackward(real *outputGrad,
int *index,
real* inputGrad,
int numSequences,
int dim) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int colIdx = idx % dim;
if (idx < numSequences*dim) {
int insId = index[idx];
inputGrad[insId * dim + colIdx] += outputGrad[idx];
}
}
void hl_max_sequence_backward(real* outputGrad,
int *index,
real* inputGrad,
int numSequences,
int dim) {
CHECK_NOTNULL(outputGrad);
CHECK_NOTNULL(index);
CHECK_NOTNULL(inputGrad);
unsigned int blocks = (numSequences * dim + 128 - 1) / 128;
dim3 threads(128, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KeMaxSequenceBackward), dim3(grid), dim3(threads), 0, STREAM_DEFAULT ,
outputGrad, index, inputGrad, numSequences, dim);
CHECK_SYNC("hl_max_sequence_backward failed");
}
template<int blockDimX, int blockDimY, int gridDimX, bool AddRow>
__global__ void KeMatrixAddRows(real* output,
real* table,
int* ids,
int numSamples,
int tableSize,
int dim) {
int idx = threadIdx.x;
int idy = threadIdx.y;
int sampleId = blockIdx.x + idy * gridDimX;
while (sampleId < numSamples) {
int tableId = ids[sampleId];
if ((0 <= tableId) && (tableId < tableSize)) {
real *outputData = output + sampleId * dim;
real *tableData = table + tableId * dim;
for (int i = idx; i < dim; i += blockDimX) {
if (AddRow == 0) {
outputData[i] += tableData[i];
} else {
paddle::paddleAtomicAdd(&tableData[i], outputData[i]);
}
}
}
sampleId += blockDimY*gridDimX;
}
}
template<int blockDimX, int blockDimY, int gridDimX, bool seq2batch, bool isAdd>
__global__
void KeSequence2Batch(real *batch,
real *sequence,
const int *batchIndex,
int seqWidth,
int batchCount) {
int idx = threadIdx.x;
int idy = threadIdx.y;
int id = blockIdx.x + idy * gridDimX;
while (id < batchCount) {
int seqId = batchIndex[id];
real* batchData = batch + id*seqWidth;
real* seqData = sequence + seqId*seqWidth;
for (int i = idx; i < seqWidth; i += blockDimX) {
if (seq2batch) {
if (isAdd) {
batchData[i] += seqData[i];
} else {
batchData[i] = seqData[i];
}
} else {
if (isAdd) {
seqData[i] += batchData[i];
} else {
seqData[i] = batchData[i];
}
}
}
id += blockDimY*gridDimX;
}
}
void hl_sequence2batch_copy(real *batch,
real *sequence,
const int *batchIndex,
int seqWidth,
int batchCount,
bool seq2batch) {
CHECK_NOTNULL(sequence);
CHECK_NOTNULL(batch);
CHECK_NOTNULL(batchIndex);
dim3 threads(128, 8);
dim3 grid(8, 1);
if (seq2batch) {
hipLaunchKernelGGL(( KeSequence2Batch<128, 8, 8, 1, 0>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT ,
batch, sequence, batchIndex, seqWidth, batchCount);
} else {
hipLaunchKernelGGL(( KeSequence2Batch<128, 8, 8, 0, 0>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT ,
batch, sequence, batchIndex, seqWidth, batchCount);
}
CHECK_SYNC("hl_sequence2batch_copy failed");
}
void hl_sequence2batch_add(real *batch,
real *sequence,
int *batchIndex,
int seqWidth,
int batchCount,
bool seq2batch) {
CHECK_NOTNULL(sequence);
CHECK_NOTNULL(batch);
CHECK_NOTNULL(batchIndex);
dim3 threads(128, 8);
dim3 grid(8, 1);
if (seq2batch) {
hipLaunchKernelGGL(( KeSequence2Batch<128, 8, 8, 1, 1>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT ,
batch, sequence, batchIndex, seqWidth, batchCount);
} else {
hipLaunchKernelGGL(( KeSequence2Batch<128, 8, 8, 0, 1>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT ,
batch, sequence, batchIndex, seqWidth, batchCount);
}
CHECK_SYNC("hl_sequence2batch_add failed");
}
template<bool normByTimes, bool seq2batch>
__global__
void KeSequence2BatchPadding(real* batch,
real* sequence,
const int* sequenceStartPositions,
const size_t sequenceWidth,
const size_t maxSequenceLength,
const size_t numSequences) {
int batchIdx = blockIdx.y;
int sequenceStart = sequenceStartPositions[batchIdx];
int sequenceLength = sequenceStartPositions[batchIdx + 1] - sequenceStart;
int sequenceIdx = blockIdx.x * blockDim.y + threadIdx.y;
int batchBaseIdx = (sequenceIdx * numSequences + batchIdx) * sequenceWidth;
int sequenceBaseIdx = (sequenceStart + sequenceIdx) * sequenceWidth;
real scale = normByTimes ? (1.0f / (real)sequenceLength) : 1.0f;
if (sequenceIdx < sequenceLength) {
if (seq2batch) {
/* sequence -> batch */
for (int i = threadIdx.x; i < sequenceWidth; i += blockDim.x) {
batch[batchBaseIdx + i] = scale * sequence[sequenceBaseIdx + i];
}
} else {
/* batch -> sequence */
for (int i = threadIdx.x; i < sequenceWidth; i += blockDim.x) {
sequence[sequenceBaseIdx + i] = scale * batch[batchBaseIdx + i];
}
}
} else if (sequenceIdx < maxSequenceLength) {
if (seq2batch) {
/* sequence -> batch */
for (int i = threadIdx.x; i < sequenceWidth; i += blockDim.x) {
batch[batchBaseIdx + i] = 0;
}
}
}
}
void hl_sequence2batch_copy_padding(real* batch,
real* sequence,
const int* sequenceStartPositions,
const size_t sequenceWidth,
const size_t maxSequenceLength,
const size_t numSequences,
bool normByTimes,
bool seq2batch) {
CHECK_NOTNULL(batch);
CHECK_NOTNULL(sequence);
CHECK_NOTNULL(sequenceStartPositions);
if (!normByTimes && numSequences == 1) {
size_t elementCount = maxSequenceLength * sequenceWidth;
if (seq2batch) {
/* sequence -> batch */
hl_memcpy_device2device(batch, sequence, sizeof(real) * elementCount);
} else {
/* batch -> sequence */
hl_memcpy_device2device(sequence, batch, sizeof(real) * elementCount);
}
return;
}
const int CUDA_BLOCK_SIZE = 512;
/* At least use 32 threads to copy sequenceWidth elements,
and at least 8 elements for each thread. */
int blockDimX = ((((sequenceWidth + 7) >> 3) + 31) >> 5) << 5;
blockDimX = (blockDimX < CUDA_BLOCK_SIZE) ? blockDimX : CUDA_BLOCK_SIZE;
int blockDimY = CUDA_BLOCK_SIZE / blockDimX;
dim3 threads(blockDimX, blockDimY);
int gridDimX = (maxSequenceLength * blockDimX + CUDA_BLOCK_SIZE - 1) /
CUDA_BLOCK_SIZE;
int gridDimY = numSequences;
dim3 grid(gridDimX, gridDimY);
if (seq2batch) {
/* sequence -> batch */
if (normByTimes) {
hipLaunchKernelGGL(( KeSequence2BatchPadding<1, 1>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT ,
batch, sequence, sequenceStartPositions,
sequenceWidth, maxSequenceLength, numSequences);
} else {
hipLaunchKernelGGL(( KeSequence2BatchPadding<0, 1>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT ,
batch, sequence, sequenceStartPositions,
sequenceWidth, maxSequenceLength, numSequences);
}
} else {
/* batch -> sequence */
if (normByTimes) {
hipLaunchKernelGGL(( KeSequence2BatchPadding<1, 0>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT ,
batch, sequence, sequenceStartPositions,
sequenceWidth, maxSequenceLength, numSequences);
} else {
hipLaunchKernelGGL(( KeSequence2BatchPadding<0, 0>), dim3(grid), dim3(threads), 0, STREAM_DEFAULT ,
batch, sequence, sequenceStartPositions,
sequenceWidth, maxSequenceLength, numSequences);
}
}
CHECK_SYNC("hl_sequence2batch_copy_padding failed");
}
__device__ inline float my_rsqrt(float x) {
return rsqrtf(x);
}
__device__ inline double my_rsqrt(double x) {
return rsqrt(x);
}
__global__ void KeSequenceAvgForward(real* dst,
real* src,
const int* starts,
int height,
int width,
const int mode) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int row = gid / width;
int col = gid % width;
if (gid < height * width) {
int start = starts[row];
int end = starts[row + 1];
int seqLength = end - start;
if (seqLength == 0) return;
real sum = 0.0;
for (int i = start; i < end; i++) {
sum += src[i * width + col];
}
sum = mode == 1 ? sum :
(mode == 0 ? sum / seqLength : sum * my_rsqrt((real)seqLength));
dst[gid] += sum;
}
}
void hl_sequence_avg_forward(real* dst,
real* src,
const int* starts,
int height,
int width,
const int mode) {
CHECK_NOTNULL(dst);
CHECK_NOTNULL(src);
CHECK_NOTNULL(starts);
int block = 512;
int grid = DIVUP(width * height, 512);
CHECK(mode == 0 || mode == 1 || mode == 2)
<< "mode error in hl_sequence_avg_forward!";
hipLaunchKernelGGL(( KeSequenceAvgForward), dim3(grid), dim3(block), 0, STREAM_DEFAULT ,
dst, src, starts, height, width, mode);
CHECK_SYNC("hl_sequence_avg_forward failed");
}
__global__ void KeSequenceAvgBackward(real* dst,
real* src,
const int* starts,
int height,
int width,
const int mode) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int row = gid / width;
int col = gid % width;
if (gid < height * width) {
int start = starts[row];
int end = starts[row + 1];
int seqLength = end - start;
if (seqLength == 0) return;
real grad = src[gid];
grad = mode == 1 ? grad :
(mode == 0 ? grad / seqLength : grad * my_rsqrt((real)seqLength));
for (int i = start; i < end; i++) {
dst[i * width + col] += grad;
}
}
}
void hl_sequence_avg_backward(real* dst,
real* src,
const int* starts,
int height,
int width,
const int mode) {
CHECK_NOTNULL(dst);
CHECK_NOTNULL(src);
CHECK_NOTNULL(starts);
int block = 512;
int grid = DIVUP(width * height, 512);
CHECK(mode == 0 || mode == 1 || mode == 2)
<< "mode error in hl_sequence_avg_backward!";
hipLaunchKernelGGL(( KeSequenceAvgBackward), dim3(grid), dim3(block), 0, STREAM_DEFAULT ,
dst, src, starts, height, width, mode);
CHECK_SYNC("hl_sequence_avg_backward failed");
}
| dabb741cb1769379a3adcc26e8ff505c74d0e172.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "hl_base.h"
#include "hl_device_functions.cuh"
#include "paddle/utils/Logging.h"
__global__ void KeMaxSequenceForward(real *input,
const int *sequence,
real* output,
int *index,
int numSequences,
int dim) {
int dimIdx = threadIdx.x;
int sequenceId = blockIdx.x;
if (sequenceId >= numSequences) return;
int start = sequence[sequenceId];
int end = sequence[sequenceId+1];
for (int i = dimIdx; i < dim; i += blockDim.x) {
real tmp = -HL_FLOAT_MAX;
int tmpId = -1;
for (int insId = start; insId < end; insId++) {
if (tmp < input[insId*dim + i]) {
tmp = input[insId*dim + i];
tmpId = insId;
}
}
output[sequenceId*dim + i] = tmp;
index[sequenceId*dim + i] = tmpId;
}
}
void hl_max_sequence_forward(real* input,
const int* sequence,
real* output,
int *index,
int numSequences,
int dim) {
CHECK_NOTNULL(input);
CHECK_NOTNULL(sequence);
CHECK_NOTNULL(output);
CHECK_NOTNULL(index);
dim3 threads(256, 1);
dim3 grid(numSequences, 1);
KeMaxSequenceForward<<< grid, threads, 0, STREAM_DEFAULT >>>
(input, sequence, output, index, numSequences, dim);
CHECK_SYNC("hl_max_sequence_forward failed");
}
__global__ void KeMaxSequenceBackward(real *outputGrad,
int *index,
real* inputGrad,
int numSequences,
int dim) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int colIdx = idx % dim;
if (idx < numSequences*dim) {
int insId = index[idx];
inputGrad[insId * dim + colIdx] += outputGrad[idx];
}
}
void hl_max_sequence_backward(real* outputGrad,
int *index,
real* inputGrad,
int numSequences,
int dim) {
CHECK_NOTNULL(outputGrad);
CHECK_NOTNULL(index);
CHECK_NOTNULL(inputGrad);
unsigned int blocks = (numSequences * dim + 128 - 1) / 128;
dim3 threads(128, 1);
dim3 grid(blocks, 1);
KeMaxSequenceBackward<<< grid, threads, 0, STREAM_DEFAULT >>>
(outputGrad, index, inputGrad, numSequences, dim);
CHECK_SYNC("hl_max_sequence_backward failed");
}
template<int blockDimX, int blockDimY, int gridDimX, bool AddRow>
__global__ void KeMatrixAddRows(real* output,
real* table,
int* ids,
int numSamples,
int tableSize,
int dim) {
int idx = threadIdx.x;
int idy = threadIdx.y;
int sampleId = blockIdx.x + idy * gridDimX;
while (sampleId < numSamples) {
int tableId = ids[sampleId];
if ((0 <= tableId) && (tableId < tableSize)) {
real *outputData = output + sampleId * dim;
real *tableData = table + tableId * dim;
for (int i = idx; i < dim; i += blockDimX) {
if (AddRow == 0) {
outputData[i] += tableData[i];
} else {
paddle::paddleAtomicAdd(&tableData[i], outputData[i]);
}
}
}
sampleId += blockDimY*gridDimX;
}
}
template<int blockDimX, int blockDimY, int gridDimX, bool seq2batch, bool isAdd>
__global__
void KeSequence2Batch(real *batch,
real *sequence,
const int *batchIndex,
int seqWidth,
int batchCount) {
int idx = threadIdx.x;
int idy = threadIdx.y;
int id = blockIdx.x + idy * gridDimX;
while (id < batchCount) {
int seqId = batchIndex[id];
real* batchData = batch + id*seqWidth;
real* seqData = sequence + seqId*seqWidth;
for (int i = idx; i < seqWidth; i += blockDimX) {
if (seq2batch) {
if (isAdd) {
batchData[i] += seqData[i];
} else {
batchData[i] = seqData[i];
}
} else {
if (isAdd) {
seqData[i] += batchData[i];
} else {
seqData[i] = batchData[i];
}
}
}
id += blockDimY*gridDimX;
}
}
void hl_sequence2batch_copy(real *batch,
real *sequence,
const int *batchIndex,
int seqWidth,
int batchCount,
bool seq2batch) {
CHECK_NOTNULL(sequence);
CHECK_NOTNULL(batch);
CHECK_NOTNULL(batchIndex);
dim3 threads(128, 8);
dim3 grid(8, 1);
if (seq2batch) {
KeSequence2Batch<128, 8, 8, 1, 0><<< grid, threads, 0, STREAM_DEFAULT >>>
(batch, sequence, batchIndex, seqWidth, batchCount);
} else {
KeSequence2Batch<128, 8, 8, 0, 0><<< grid, threads, 0, STREAM_DEFAULT >>>
(batch, sequence, batchIndex, seqWidth, batchCount);
}
CHECK_SYNC("hl_sequence2batch_copy failed");
}
void hl_sequence2batch_add(real *batch,
real *sequence,
int *batchIndex,
int seqWidth,
int batchCount,
bool seq2batch) {
CHECK_NOTNULL(sequence);
CHECK_NOTNULL(batch);
CHECK_NOTNULL(batchIndex);
dim3 threads(128, 8);
dim3 grid(8, 1);
if (seq2batch) {
KeSequence2Batch<128, 8, 8, 1, 1><<< grid, threads, 0, STREAM_DEFAULT >>>
(batch, sequence, batchIndex, seqWidth, batchCount);
} else {
KeSequence2Batch<128, 8, 8, 0, 1><<< grid, threads, 0, STREAM_DEFAULT >>>
(batch, sequence, batchIndex, seqWidth, batchCount);
}
CHECK_SYNC("hl_sequence2batch_add failed");
}
template<bool normByTimes, bool seq2batch>
__global__
void KeSequence2BatchPadding(real* batch,
real* sequence,
const int* sequenceStartPositions,
const size_t sequenceWidth,
const size_t maxSequenceLength,
const size_t numSequences) {
int batchIdx = blockIdx.y;
int sequenceStart = sequenceStartPositions[batchIdx];
int sequenceLength = sequenceStartPositions[batchIdx + 1] - sequenceStart;
int sequenceIdx = blockIdx.x * blockDim.y + threadIdx.y;
int batchBaseIdx = (sequenceIdx * numSequences + batchIdx) * sequenceWidth;
int sequenceBaseIdx = (sequenceStart + sequenceIdx) * sequenceWidth;
real scale = normByTimes ? (1.0f / (real)sequenceLength) : 1.0f;
if (sequenceIdx < sequenceLength) {
if (seq2batch) {
/* sequence -> batch */
for (int i = threadIdx.x; i < sequenceWidth; i += blockDim.x) {
batch[batchBaseIdx + i] = scale * sequence[sequenceBaseIdx + i];
}
} else {
/* batch -> sequence */
for (int i = threadIdx.x; i < sequenceWidth; i += blockDim.x) {
sequence[sequenceBaseIdx + i] = scale * batch[batchBaseIdx + i];
}
}
} else if (sequenceIdx < maxSequenceLength) {
if (seq2batch) {
/* sequence -> batch */
for (int i = threadIdx.x; i < sequenceWidth; i += blockDim.x) {
batch[batchBaseIdx + i] = 0;
}
}
}
}
void hl_sequence2batch_copy_padding(real* batch,
real* sequence,
const int* sequenceStartPositions,
const size_t sequenceWidth,
const size_t maxSequenceLength,
const size_t numSequences,
bool normByTimes,
bool seq2batch) {
CHECK_NOTNULL(batch);
CHECK_NOTNULL(sequence);
CHECK_NOTNULL(sequenceStartPositions);
if (!normByTimes && numSequences == 1) {
size_t elementCount = maxSequenceLength * sequenceWidth;
if (seq2batch) {
/* sequence -> batch */
hl_memcpy_device2device(batch, sequence, sizeof(real) * elementCount);
} else {
/* batch -> sequence */
hl_memcpy_device2device(sequence, batch, sizeof(real) * elementCount);
}
return;
}
const int CUDA_BLOCK_SIZE = 512;
/* At least use 32 threads to copy sequenceWidth elements,
and at least 8 elements for each thread. */
int blockDimX = ((((sequenceWidth + 7) >> 3) + 31) >> 5) << 5;
blockDimX = (blockDimX < CUDA_BLOCK_SIZE) ? blockDimX : CUDA_BLOCK_SIZE;
int blockDimY = CUDA_BLOCK_SIZE / blockDimX;
dim3 threads(blockDimX, blockDimY);
int gridDimX = (maxSequenceLength * blockDimX + CUDA_BLOCK_SIZE - 1) /
CUDA_BLOCK_SIZE;
int gridDimY = numSequences;
dim3 grid(gridDimX, gridDimY);
if (seq2batch) {
/* sequence -> batch */
if (normByTimes) {
KeSequence2BatchPadding<1, 1><<< grid, threads, 0, STREAM_DEFAULT >>>(
batch, sequence, sequenceStartPositions,
sequenceWidth, maxSequenceLength, numSequences);
} else {
KeSequence2BatchPadding<0, 1><<< grid, threads, 0, STREAM_DEFAULT >>>(
batch, sequence, sequenceStartPositions,
sequenceWidth, maxSequenceLength, numSequences);
}
} else {
/* batch -> sequence */
if (normByTimes) {
KeSequence2BatchPadding<1, 0><<< grid, threads, 0, STREAM_DEFAULT >>>(
batch, sequence, sequenceStartPositions,
sequenceWidth, maxSequenceLength, numSequences);
} else {
KeSequence2BatchPadding<0, 0><<< grid, threads, 0, STREAM_DEFAULT >>>(
batch, sequence, sequenceStartPositions,
sequenceWidth, maxSequenceLength, numSequences);
}
}
CHECK_SYNC("hl_sequence2batch_copy_padding failed");
}
__device__ inline float my_rsqrt(float x) {
return rsqrtf(x);
}
__device__ inline double my_rsqrt(double x) {
return rsqrt(x);
}
__global__ void KeSequenceAvgForward(real* dst,
real* src,
const int* starts,
int height,
int width,
const int mode) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int row = gid / width;
int col = gid % width;
if (gid < height * width) {
int start = starts[row];
int end = starts[row + 1];
int seqLength = end - start;
if (seqLength == 0) return;
real sum = 0.0;
for (int i = start; i < end; i++) {
sum += src[i * width + col];
}
sum = mode == 1 ? sum :
(mode == 0 ? sum / seqLength : sum * my_rsqrt((real)seqLength));
dst[gid] += sum;
}
}
void hl_sequence_avg_forward(real* dst,
real* src,
const int* starts,
int height,
int width,
const int mode) {
CHECK_NOTNULL(dst);
CHECK_NOTNULL(src);
CHECK_NOTNULL(starts);
int block = 512;
int grid = DIVUP(width * height, 512);
CHECK(mode == 0 || mode == 1 || mode == 2)
<< "mode error in hl_sequence_avg_forward!";
KeSequenceAvgForward<<< grid, block, 0, STREAM_DEFAULT >>>
(dst, src, starts, height, width, mode);
CHECK_SYNC("hl_sequence_avg_forward failed");
}
__global__ void KeSequenceAvgBackward(real* dst,
real* src,
const int* starts,
int height,
int width,
const int mode) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int row = gid / width;
int col = gid % width;
if (gid < height * width) {
int start = starts[row];
int end = starts[row + 1];
int seqLength = end - start;
if (seqLength == 0) return;
real grad = src[gid];
grad = mode == 1 ? grad :
(mode == 0 ? grad / seqLength : grad * my_rsqrt((real)seqLength));
for (int i = start; i < end; i++) {
dst[i * width + col] += grad;
}
}
}
void hl_sequence_avg_backward(real* dst,
real* src,
const int* starts,
int height,
int width,
const int mode) {
CHECK_NOTNULL(dst);
CHECK_NOTNULL(src);
CHECK_NOTNULL(starts);
int block = 512;
int grid = DIVUP(width * height, 512);
CHECK(mode == 0 || mode == 1 || mode == 2)
<< "mode error in hl_sequence_avg_backward!";
KeSequenceAvgBackward<<< grid, block, 0, STREAM_DEFAULT >>>
(dst, src, starts, height, width, mode);
CHECK_SYNC("hl_sequence_avg_backward failed");
}
|
4f65626d2360356ff1ca173494213bb341c97b33.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<cudpp.h>
#include<limits.h>
#include <sys/time.h>
#define NO_OF_THREADS_PER_BLOCK 1024
unsigned int noOfEdges;
unsigned int noOfVertices;
unsigned int *vertices;
unsigned int *edges;
unsigned int *weights;
unsigned int *d_size;
unsigned int *d_edgeListSize;
unsigned int *d_vertexListSize;
unsigned int *segmentedMinScanInput;
unsigned int *d_segmentedMinScanInput;
unsigned int *d_segmentedMinScanOutput;
unsigned int *d_previousIDs;
unsigned int *d_successorArray;
unsigned int *d_successorArrayTemp;
unsigned int *d_indices;
unsigned int *d_edgeMap;
unsigned int *d_edgeMapCopy;
unsigned int *d_edgesCopy;
unsigned int *d_edgeIndices;
unsigned int *d_superVertexID;
unsigned int *d_superEdgeId;
unsigned int *d_MSTOutput;
unsigned int *h_MSTOutput;
unsigned int *d_edges;
unsigned int *d_vertices;
unsigned int *d_weights;
unsigned int *d_edgeFlagArray;
unsigned int *d_vertexFlagArray;
unsigned int noOfEdgesOriginal;
unsigned int noOfVerticesOriginal;
int *d_pickArray;
CUDPPHandle theCudpp;
CUDPPHandle segmentedScanPlan_min;
CUDPPConfiguration segmented_min_scan_config;
CUDPPHandle scanPlan;
CUDPPConfiguration scan_config;
CUDPPHandle sortPlan;
CUDPPConfiguration config_sort;
/* Append vertexid and edge into a single integer of an array*/
__global__ void mergeEdgeAndWeight(unsigned int *d_segmentedMinScanInput, unsigned int *d_vertices, unsigned int *d_weight, unsigned int *d_edges, unsigned int noOfEdges)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfEdges) {
unsigned int temp = d_weight[index];
d_segmentedMinScanInput[index] = (temp<<22) | d_edges[index];
}
}
/* initialise all entries of array pointed by d_array of given size to 0*/
__global__ void initArray(unsigned int *d_Array, unsigned int size)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < size) {
d_Array[index] = 0;
}
}
__global__ void printArr(unsigned int *d_arr, unsigned int size)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if (index < size) {
printf("%d ", d_arr[index]);
}
printf("\n");
}
/* creates a flag array for segmented scan. Sets to 1 the index from where outgoing vertex starts*/
__global__ void markSegment(unsigned int *d_edgeFlagArray, unsigned int *d_vertex, unsigned int *d_edges, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices) {
d_edgeFlagArray[d_vertex[index]] = 1;
}
}
/*prints new edge and vertex size*/
__global__ void print(unsigned int *d_edgeListSize, unsigned int *d_vertexListSize)
{
printf("Edges: %d, Vertices %d \n", *d_edgeListSize, *d_vertexListSize);
}
/*creates successor array*/
__global__ void createSuccArray(unsigned int *d_successorArray, unsigned int *d_vertices, unsigned int *d_segmentedMinScanInput, unsigned int *d_segmentedMinScanOutput, unsigned int noOfVertices, unsigned int noOfEdges)
{
unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
unsigned int minEdgeIndex;
if(index < noOfVertices) {
//index is same as vertex ID
if (index == noOfVertices-1)
minEdgeIndex = noOfEdges - 1;
else
minEdgeIndex = d_vertices[index+1] - 1; // min value is stored in loc of last neighbour
unsigned int val = d_segmentedMinScanOutput[minEdgeIndex];
//unsigned int minWeight = val >> 22;
unsigned int minVertex = val & (unsigned int)(pow(2.0,22)-1);
d_successorArray[index] = minVertex;
}
}
/*removes cycles from successor array*/
__global__ void eliminateCycles(unsigned int *d_successor, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices) {
unsigned int succIndex = d_successor[d_successor[index]];
if(index == succIndex) {
if(index < d_successor[index]) {
d_successor[index] = index;
} else {
d_successor[d_successor[index]]= d_successor[index];
}
}
}
}
/* hybrid implementation of markSegment function */
__global__ void markSegment1(unsigned int *d_edgeFlagArray, unsigned int *d_vertex, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices && index > 0) {
d_edgeFlagArray[d_vertex[index]] = 1;
}
}
/*This function is to determine which edges are actually needed*/
__global__ void populatePArray(int *d_pickArray, unsigned int *d_vertices, unsigned int *d_successor, unsigned int *d_preIDs, unsigned int noOfVertices, unsigned int noOfEdges)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfEdges) {
if(d_preIDs[index] != d_successor[d_preIDs[index]]) {
if(d_preIDs[index] < (noOfVertices - 1))
d_pickArray[index] = d_vertices[d_preIDs[index]+1] - 1;
else
d_pickArray[index] = noOfEdges - 1;
}
else
d_pickArray[index] = -1;
}
}
/*This function determines which edges will be part of output*/
__global__ void AppendOutputEdges(int *d_pickArray, unsigned int * d_segmentedMinScanInput, unsigned int *d_segmentedMinScanOutput, unsigned int *d_MSTOutput, unsigned int *d_edgeMap, unsigned int noOfEdges)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfEdges && d_pickArray[index] >= 0) {
unsigned int edgeid = d_edgeMap[index];
unsigned int prev = 0;
int temp = -1;
unsigned int segmentedOutput = d_segmentedMinScanOutput[d_pickArray[index]];
unsigned int currIndex = d_segmentedMinScanOutput[index];
if(index > 0) {
temp = d_pickArray[index-1];
prev = d_segmentedMinScanOutput[index-1];
}
if(d_pickArray[index] != temp) {
if(currIndex == segmentedOutput) {
d_MSTOutput[edgeid]=1;
}
} else {
if(currIndex != prev && currIndex == segmentedOutput) {
d_MSTOutput[edgeid]=1;
}
}
}
}
/*This function sets each value of array equal to its index*/
__global__ void setIndices(unsigned int *d_arr,unsigned int size)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < size)
d_arr[index] = index;
}
/* This function copies data from original successorArray so that it can be used for new computation*/
__global__ void makeTempSuccCopy(unsigned int *d_successorArray, unsigned int* d_vertex, unsigned int *d_successorArrayTemp, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices) {
unsigned int t = d_successorArray[index];
d_successorArrayTemp[index] = t;
}
}
/* This function copies data from temporary successorArray so that it can be updated with correct value */
__global__ void updateSuccArray(unsigned int *d_successorArray, unsigned int* d_vertex, unsigned int *d_successorArrayTemp, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices) {
unsigned int t = d_successorArrayTemp[index];
d_successorArray[index] = t;
}
}
/* This function uses pointer doubling to assign representative id to each vertex*/
__global__ void propagateRepVertexID(unsigned int *d_successorArray, bool *d_isSuccUpdated, unsigned int *d_previousIDs, unsigned int *d_successorArrayTemp, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices) {
unsigned int successor = d_successorArray[index];
if(successor != d_successorArray[successor]) { //Eindex = 2 and end = 6 and u = 2 and succ[u] = 2
*d_isSuccUpdated=true;
d_successorArrayTemp[index] = d_successorArray[successor];
}
}
}
/* This function iteratively sets s(s(u)) = u and propogates representative vertex id*/
void propagateID(unsigned int noOfBlocks_vertices, unsigned int noOfThreads_vertices)
{
bool succchange;
bool *d_isSuccUpdated;
hipMalloc(&d_successorArrayTemp, sizeof(int)*noOfVertices);
hipMalloc((void**)&d_isSuccUpdated, sizeof(bool));
do
{
succchange=false;
hipMemcpy(d_isSuccUpdated, &succchange, sizeof(bool), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( makeTempSuccCopy), dim3(noOfBlocks_vertices),dim3(noOfThreads_vertices), 0, 0, d_successorArray, d_vertices, d_successorArrayTemp, noOfVertices);
hipLaunchKernelGGL(( propagateRepVertexID), dim3(noOfBlocks_vertices),dim3(noOfThreads_vertices), 0, 0, d_successorArray, d_isSuccUpdated, d_previousIDs,d_successorArrayTemp, noOfVertices);
hipLaunchKernelGGL(( updateSuccArray), dim3(noOfBlocks_vertices),dim3(noOfThreads_vertices), 0, 0, d_successorArray, d_vertices, d_successorArrayTemp, noOfVertices);
hipMemcpy(&succchange, d_isSuccUpdated, sizeof(bool), hipMemcpyDeviceToHost);
}while(succchange);
hipFree(d_successorArrayTemp);
hipFree(d_isSuccUpdated);
}
/*This function creates scan flag*/
void __global__ createScanFlag(unsigned int *d_vertexFlagArray, unsigned int *d_successorArray, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices && index > 0)
{
unsigned int prev_val = d_successorArray[index-1];
unsigned int curr_val = d_successorArray[index];
if (prev_val != curr_val) {
d_vertexFlagArray[index] = 1;
}
}
}
/*This function assigns supervertex id to each vertex*/
__global__ void assignSuperVertexID(unsigned int *d_superVertex, unsigned int *d_indices, unsigned int *d_vertexFlagArray,unsigned int *d_previousIDs,unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices) {
d_vertexFlagArray[d_indices[index]] = d_superVertex[index];
}
}
/* This function updates supervertexid */
__global__ void updateSuperVertexID(unsigned int *d_superVertex,unsigned int *d_arr,unsigned int *d_vertexFlagArray, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices) {
unsigned int newId = d_vertexFlagArray[index];
d_superVertex[index] = newId;
}
}
/* This function removes self edges after successor array is created */
__global__ void removeSelfEdges(unsigned int *d_edges, unsigned int *d_prevIds,unsigned int *d_superVertexID, unsigned int noOfEdges)
{
unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfEdges) {
unsigned int uid = d_superVertexID[d_prevIds[index]]; //vause d_prevIds[index] is 1 to 6 but we need 0 to 5
unsigned int vid = d_superVertexID[d_edges[index]];
if(uid == vid) {
d_edges[index]=INT_MAX;
}
}
}
/* This function is to assign new super edge id*/
__global__ void assignSuperEdgeId(unsigned int *d_superEdgeId, unsigned int *d_previousIds, unsigned int *d_superVertexId, unsigned int *d_edge, unsigned int noOfEdges)
{
unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfEdges)
{
unsigned int x = d_previousIds[index];
unsigned int id = INT_MAX;
if (x != INT_MAX && d_edge[index] != INT_MAX) {
id = d_superVertexId[x];
}
d_superEdgeId[index] = id;
}
}
/* This function is to compress the edge list*/
__global__ void edgeCompression(unsigned int *d_edges, unsigned int *d_weights, unsigned int *d_vertex, unsigned int *d_segmentedMinScanInput, unsigned int *d_segmentedMinScanOutput, unsigned int *d_superVertexID, unsigned int *d_edgeMap, unsigned int *d_edgeMapCopy, unsigned int *d_edgeFlagArray, unsigned int *d_superEdgeId, unsigned int * d_edgeIndices, int *d_pickArray, unsigned int *d_size, unsigned int *d_edgeListSize, unsigned int *d_vertexListSize)
{
unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < *d_size) {
unsigned int id = d_edgeIndices[index];
if(d_superEdgeId[index] != INT_MAX && d_edges[id] != INT_MAX) {
if(index == *d_size-1) {
*d_edgeListSize = index + 1;
*d_vertexListSize = d_superEdgeId[index] + 1;
}
d_segmentedMinScanOutput[index] = d_weights[id];
d_segmentedMinScanInput[index] = d_superVertexID[d_edges[id]];
d_pickArray[index] = d_superEdgeId[index];
d_edgeMapCopy[index] = d_edgeMap[id];
}
}
}
/*This function copies the temporary array to arrays which will be actually used*/
__global__ void copyArrays(unsigned int *d_edges, unsigned int *d_weights, unsigned int *vertex, unsigned int *d_segmentedMinScanInput, unsigned int *d_segmentedMinScanOutput, unsigned int *d_edgeMap, unsigned int *d_edgeMapCopy, unsigned int *d_edgeCopy, unsigned int *d_size)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < *d_size) {
unsigned int p = d_segmentedMinScanInput[index];
d_edges[index] = p;
unsigned int wt = d_segmentedMinScanOutput[index];
d_weights[index] = wt;
unsigned int mapVal = d_edgeMapCopy[index];
d_edgeMap[index] = mapVal;
}
}
/*This function determines the new edge list*/
__global__ void makeEdgeList(unsigned int *d_edgeFlagArray, unsigned int *d_edges, unsigned int *d_superEdgeId, unsigned int *d_size, unsigned int noOfEdges)
{
unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index == 0) {
d_edgeFlagArray[index] = 1;
} else if(index < noOfEdges && index > 0) {
if(d_superEdgeId[index-1] != INT_MAX && d_superEdgeId[index] == INT_MAX) {
*d_size = index;
}
if(d_superEdgeId[index] > d_superEdgeId[index-1]) {
d_edgeFlagArray[index] = 1;
}
}
}
/*This function helps in creating new vertices list for next iteration*/
__global__ void CreateVertexListFlag(unsigned int *d_edgeFlagArray, unsigned int *d_vertices, int *d_pickArray, unsigned int noOfEdges)
{
unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index == 0) {
d_edgeFlagArray[index] = 1;
} else if(index < noOfEdges && index > 0) {
if(d_pickArray[index] > d_pickArray[index-1]) {
d_edgeFlagArray[index] = 1;
}
}
}
/*This function helps to build new vertex list*/
__global__ void BuildVertexList(unsigned int *d_vertices, unsigned int *d_edges, int *d_pickArray, unsigned int *d_edgeFlagArray, unsigned int noOfEdges)
{
unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfEdges && d_edgeFlagArray[index] == 1) {
d_vertices[d_pickArray[index]] = index;
}
}
/* Parse the input file to setup our graph
* we set the relevant arrays here
*/
void parseInputFile(char *fileName)
{
unsigned int x,temp;
unsigned int edgeNo, weightOfEdge;
FILE *fp;
fp = fopen(fileName,"r");
printf("\n Parsing Input File: \n");
fscanf(fp,"%d",&noOfVertices);
vertices = (unsigned int *)malloc(sizeof(unsigned int) * noOfVertices);
int i;
for (i=0; i<noOfVertices; i++) {
fscanf(fp,"%d %d",&x, &temp);
vertices[i] = x;
}
fscanf(fp,"%d",&temp);
fscanf(fp,"%d",&noOfEdges);
edges = (unsigned int *)malloc(sizeof(unsigned int)*noOfEdges);
weights = (unsigned int *)malloc(sizeof(unsigned int)*noOfEdges);
for(i=0; i<noOfEdges; i++) {
fscanf(fp,"%d %d",&edgeNo, &weightOfEdge);
edges[i] = edgeNo;
weights[i] = weightOfEdge;
}
printf("No. of Vertices in Input: %d\n",noOfVertices);
printf("No. of Edges in Input: %d\n", noOfEdges);
fclose(fp);
}
/* this is to setup configuration parameters for various primitives*/
void setupPlan()
{
cudppCreate(&theCudpp);
scan_config.algorithm = CUDPP_SCAN;
scan_config.op = CUDPP_ADD;
scan_config.datatype = CUDPP_UINT;
scan_config.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_INCLUSIVE;
segmented_min_scan_config.algorithm = CUDPP_SEGMENTED_SCAN;
segmented_min_scan_config.op = CUDPP_MIN;
segmented_min_scan_config.datatype = CUDPP_UINT;
segmented_min_scan_config.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_INCLUSIVE;
config_sort.algorithm = CUDPP_SORT_RADIX;
config_sort.datatype = CUDPP_UINT;
config_sort.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_KEY_VALUE_PAIRS;
}
/* Dynamically allocate necessary arrays*/
void mallocArr()
{
hipMalloc(&d_segmentedMinScanInput, sizeof(unsigned int )*noOfEdges);
hipMalloc(&d_weights, sizeof(unsigned int )*noOfEdges);
hipMalloc(&d_edges, sizeof(unsigned int )*noOfEdges);
hipMalloc(&d_vertices, sizeof(unsigned int )*noOfVertices);
hipMalloc(&d_edgeFlagArray, sizeof(unsigned int )*noOfEdges);
hipMalloc(&d_segmentedMinScanOutput, sizeof(unsigned int )*noOfEdges);
hipMalloc(&d_successorArray, sizeof(unsigned int )*noOfVertices);
hipMalloc(&d_previousIDs, sizeof(unsigned int )*noOfEdges);
hipMalloc(&d_pickArray, sizeof(int )*noOfEdges);
hipMalloc(&d_superVertexID, sizeof(unsigned int )*noOfVertices);
hipMalloc(&d_MSTOutput, sizeof(unsigned int )*noOfEdges);
hipMalloc(&d_indices, sizeof(unsigned int )*noOfVertices);
hipMalloc(&d_vertexFlagArray, sizeof(unsigned int )*noOfVertices);
hipMalloc(&d_superVertexID, sizeof(unsigned int )*noOfVertices);
hipMalloc(&d_size, sizeof(unsigned int ));
hipMalloc(&d_superEdgeId, sizeof(unsigned int )*noOfEdges);
hipMalloc(&d_edgeIndices, sizeof(unsigned int )*noOfEdges);
hipMalloc(&d_edgeListSize, sizeof(unsigned int ));
hipMalloc(&d_vertexListSize, sizeof(unsigned int ));
hipMalloc(&d_edgeMapCopy, sizeof(unsigned int )*noOfEdges);
hipMalloc(&d_edgeMap, sizeof(unsigned int )*noOfEdges);
h_MSTOutput = (unsigned int *)malloc(sizeof(unsigned int )*noOfEdges);
}
/*Free the dynamically allocated memory. Do other cleanup here*/
void cleanUp()
{
hipFree(d_edgeIndices);
hipFree(d_superEdgeId);
hipFree(d_edgeMap);
hipFree(d_edgeMapCopy);
hipFree(d_superVertexID);
hipFree(d_vertexFlagArray);
hipFree(d_indices);
hipFree(d_MSTOutput);
hipFree(d_previousIDs);
hipFree(d_pickArray);
hipFree(d_successorArray);
hipFree(d_segmentedMinScanOutput);
hipFree(d_edgeFlagArray);
hipFree(d_vertices);
hipFree(d_edges);
hipFree(d_weights);
hipFree(d_segmentedMinScanInput);
hipFree(d_size);
hipFree(d_edgeListSize);
hipFree(d_vertexListSize);
cudppDestroy(theCudpp);
free(h_MSTOutput);
free(edges);
free(vertices);
free(weights);
}
/* Do basic initialization*/
void initialize()
{
unsigned int i;
hipMemcpy(d_vertices, vertices, sizeof(unsigned int)*noOfVertices, hipMemcpyHostToDevice);
hipMemcpy(d_edges, edges, sizeof(unsigned int)*noOfEdges, hipMemcpyHostToDevice);
hipMemcpy(d_weights, weights, sizeof(unsigned int)*noOfEdges, hipMemcpyHostToDevice);
unsigned int *temp = (unsigned int *)malloc(sizeof(unsigned int)*noOfEdges);
for(i=0; i<noOfEdges; i++)
temp[i] = 0;
hipMemcpy(d_MSTOutput, temp, sizeof(unsigned int )*noOfEdges, hipMemcpyHostToDevice);
for(i=0; i<noOfEdges; i++)
temp[i]=i;
hipMemcpy(d_edgeMap, temp, sizeof(unsigned int)*noOfEdges, hipMemcpyHostToDevice);
free(temp);
}
/* Helper function to determine no of threads to be used */
unsigned int getNoOfThreads(unsigned int size) {
unsigned int threadsPerBlock;
if (size <= 1024)
threadsPerBlock = size;
else
threadsPerBlock = 1024;
return threadsPerBlock;
}
void boruvka()
{
unsigned int noOfThreads_edge = getNoOfThreads(noOfEdges);
unsigned int noOfBlocks_edge = (noOfEdges+1024)/noOfThreads_edge;
unsigned int noOfThreads_vertices = getNoOfThreads(noOfVertices);
unsigned int noOfBlocks_vertices = (noOfVertices+1024)/noOfThreads_vertices;
hipError_t error;
hipLaunchKernelGGL(( mergeEdgeAndWeight), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_segmentedMinScanInput, d_vertices, d_weights, d_edges, noOfEdges);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("0.1 CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeFlagArray, noOfEdges);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("At line 577 CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
hipLaunchKernelGGL(( markSegment), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_edgeFlagArray, d_vertices, d_edges, noOfVertices);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("3 CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
cudppPlan(theCudpp, &segmentedScanPlan_min,segmented_min_scan_config, noOfEdges, 1, 0 ); //Make the segmented min scan plan
cudppSegmentedScan(segmentedScanPlan_min, d_segmentedMinScanOutput, d_segmentedMinScanInput, (const unsigned int *)d_edgeFlagArray, noOfEdges);
cudppDestroyPlan(segmentedScanPlan_min);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("CUDA error: %s\n", hipGetErrorString(error));
// exit(-1);
}
hipLaunchKernelGGL(( createSuccArray), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_successorArray, d_vertices, d_segmentedMinScanInput, d_segmentedMinScanOutput, noOfVertices, noOfEdges);
hipLaunchKernelGGL(( eliminateCycles), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_successorArray, noOfVertices);
hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeFlagArray, noOfEdges);
hipLaunchKernelGGL(( markSegment1), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_edgeFlagArray, d_vertices, noOfVertices);
cudppPlan(theCudpp, &scanPlan, scan_config, noOfEdges, 1, 0);
cudppScan(scanPlan, d_previousIDs, d_edgeFlagArray, noOfEdges);
cudppDestroyPlan(scanPlan);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("At line 614 CUDA error: %s\n", hipGetErrorString(error));
}
hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, (unsigned int*)d_pickArray, noOfEdges);
hipLaunchKernelGGL(( populatePArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_pickArray, d_vertices, d_successorArray, d_previousIDs, noOfVertices, noOfEdges);
hipDeviceSynchronize();
hipLaunchKernelGGL(( AppendOutputEdges), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_pickArray, d_segmentedMinScanInput, d_segmentedMinScanOutput, d_MSTOutput, d_edgeMap, noOfEdges);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("At line 626 CUDA error: %s\n", hipGetErrorString(error));
}
propagateID(noOfBlocks_vertices, noOfThreads_vertices);
hipLaunchKernelGGL(( setIndices), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_indices,noOfVertices);
cudppPlan(theCudpp, &sortPlan, config_sort, noOfVertices, 1, 0);
cudppRadixSort(sortPlan, d_successorArray, d_indices, noOfVertices);
cudppDestroyPlan(sortPlan);
hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_vertexFlagArray,noOfVertices);
hipLaunchKernelGGL(( createScanFlag), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_vertexFlagArray, d_successorArray,noOfVertices);
cudppPlan(theCudpp, &scanPlan, scan_config, noOfVertices, 1, 0);
cudppScan(scanPlan, d_superVertexID, d_vertexFlagArray, noOfVertices);
cudppDestroyPlan(scanPlan);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("At line 645 CUDA error: %s\n", hipGetErrorString(error));
}
hipLaunchKernelGGL(( assignSuperVertexID), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_superVertexID,d_indices,d_vertexFlagArray,d_previousIDs,noOfVertices);
hipLaunchKernelGGL(( updateSuperVertexID), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_superVertexID,d_indices,d_vertexFlagArray, noOfVertices);
hipLaunchKernelGGL(( removeSelfEdges), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edges,d_previousIDs,d_superVertexID,noOfEdges);
hipLaunchKernelGGL(( assignSuperEdgeId), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_superEdgeId,d_previousIDs, d_superVertexID, d_edges, noOfEdges);
hipLaunchKernelGGL(( setIndices), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeIndices,noOfEdges);
cudppPlan(theCudpp, &sortPlan, config_sort, noOfEdges, 1, 0);
cudppRadixSort(sortPlan, d_superEdgeId, d_edgeIndices, noOfEdges);
cudppDestroyPlan(sortPlan);
hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeFlagArray, noOfEdges);
unsigned int h_size = noOfEdges + 1;
hipMemcpy(d_size,&h_size,sizeof(unsigned int ), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( makeEdgeList), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeFlagArray, d_edges, d_superEdgeId, d_size, noOfEdges);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("At line 668 CUDA error: %s\n", hipGetErrorString(error));
}
unsigned int zero = 0;
hipMemcpy(d_edgeListSize, &zero, sizeof(unsigned int ), hipMemcpyHostToDevice);
hipMemcpy(d_vertexListSize, &zero, sizeof(unsigned int ), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_segmentedMinScanInput, noOfEdges);
hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_segmentedMinScanOutput, noOfEdges);
hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, (unsigned int*)d_pickArray, noOfEdges);
hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeMapCopy, noOfEdges);
hipMemcpy(&h_size,d_size,sizeof(unsigned int ), hipMemcpyDeviceToHost);
unsigned int noOfThreads_new = getNoOfThreads(h_size);
unsigned int noOfBlocks_new = (h_size+1024)/noOfThreads_new;
hipLaunchKernelGGL(( edgeCompression), dim3(noOfBlocks_new), dim3(noOfThreads_new), 0, 0, d_edges, d_weights, d_vertices, d_segmentedMinScanInput, d_segmentedMinScanOutput, d_superVertexID, d_edgeMap, d_edgeMapCopy, d_edgeFlagArray, d_superEdgeId, d_edgeIndices, d_pickArray, d_size, d_edgeListSize, d_vertexListSize);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("At line 688 CUDA error: %s\n", hipGetErrorString(error));
}
hipLaunchKernelGGL(( copyArrays), dim3(noOfBlocks_new), dim3(noOfThreads_new), 0, 0, d_edges, d_weights, d_vertices, d_segmentedMinScanInput, d_segmentedMinScanOutput, d_edgeMap, d_edgeMapCopy, d_edgeFlagArray, d_size);
hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeFlagArray, noOfEdges);
hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_vertices, noOfVertices);
hipLaunchKernelGGL(( CreateVertexListFlag), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeFlagArray, d_vertices, d_pickArray, noOfEdges);
hipLaunchKernelGGL(( BuildVertexList), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_vertices, d_edges, d_pickArray, d_edgeFlagArray, noOfEdges);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("after build vertex listlast CUDA error: %s\n", hipGetErrorString(error));
}
hipMemcpy(&noOfEdges, d_edgeListSize, sizeof(unsigned int ), hipMemcpyDeviceToHost);
hipMemcpy(&noOfVertices, d_vertexListSize, sizeof(unsigned int ), hipMemcpyDeviceToHost);
printf("for next round, no of edges = %d and no of vertices = %d\n",noOfEdges, noOfVertices);
error = hipGetLastError();
if(error != hipSuccess)
{
printf("last CUDA error: %s\n", hipGetErrorString(error));
}
}
int main (int argc, char** argv)
{
unsigned int noOfMSTEdges = 0;
unsigned long long int finalMSTWeight = 0;
unsigned int i;
parseInputFile(argv[1]);
noOfVerticesOriginal = noOfVertices;
noOfEdgesOriginal = noOfEdges;
mallocArr();
initialize();
setupPlan();
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
do {
boruvka();
}while(noOfVertices > 1);
hipDeviceSynchronize();
gettimeofday(&tv2, NULL);
printf ("Total Execution time = %f seconds\n", (double)(tv2.tv_usec - tv1.tv_usec) / 1000000 + (double)(tv2.tv_sec - tv1.tv_sec));
hipMemcpy(h_MSTOutput, d_MSTOutput, sizeof(unsigned int )*noOfEdgesOriginal, hipMemcpyDeviceToHost);
for(i=0; i<noOfEdgesOriginal; i++) {
if(h_MSTOutput[i] == 1) {
finalMSTWeight += weights[i];
noOfMSTEdges++;
}
}
printf("\nNo. of edges in MST [must be equal to (%d-1)]: %d\n", noOfVerticesOriginal, noOfMSTEdges);
printf("Final Weight of resultant MST: %llu\n", finalMSTWeight);
cleanUp();
return 0;
}
| 4f65626d2360356ff1ca173494213bb341c97b33.cu | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<cudpp.h>
#include<limits.h>
#include <sys/time.h>
#define NO_OF_THREADS_PER_BLOCK 1024
unsigned int noOfEdges;
unsigned int noOfVertices;
unsigned int *vertices;
unsigned int *edges;
unsigned int *weights;
unsigned int *d_size;
unsigned int *d_edgeListSize;
unsigned int *d_vertexListSize;
unsigned int *segmentedMinScanInput;
unsigned int *d_segmentedMinScanInput;
unsigned int *d_segmentedMinScanOutput;
unsigned int *d_previousIDs;
unsigned int *d_successorArray;
unsigned int *d_successorArrayTemp;
unsigned int *d_indices;
unsigned int *d_edgeMap;
unsigned int *d_edgeMapCopy;
unsigned int *d_edgesCopy;
unsigned int *d_edgeIndices;
unsigned int *d_superVertexID;
unsigned int *d_superEdgeId;
unsigned int *d_MSTOutput;
unsigned int *h_MSTOutput;
unsigned int *d_edges;
unsigned int *d_vertices;
unsigned int *d_weights;
unsigned int *d_edgeFlagArray;
unsigned int *d_vertexFlagArray;
unsigned int noOfEdgesOriginal;
unsigned int noOfVerticesOriginal;
int *d_pickArray;
CUDPPHandle theCudpp;
CUDPPHandle segmentedScanPlan_min;
CUDPPConfiguration segmented_min_scan_config;
CUDPPHandle scanPlan;
CUDPPConfiguration scan_config;
CUDPPHandle sortPlan;
CUDPPConfiguration config_sort;
/* Append vertexid and edge into a single integer of an array*/
__global__ void mergeEdgeAndWeight(unsigned int *d_segmentedMinScanInput, unsigned int *d_vertices, unsigned int *d_weight, unsigned int *d_edges, unsigned int noOfEdges)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfEdges) {
unsigned int temp = d_weight[index];
d_segmentedMinScanInput[index] = (temp<<22) | d_edges[index];
}
}
/* initialise all entries of array pointed by d_array of given size to 0*/
__global__ void initArray(unsigned int *d_Array, unsigned int size)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < size) {
d_Array[index] = 0;
}
}
__global__ void printArr(unsigned int *d_arr, unsigned int size)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if (index < size) {
printf("%d ", d_arr[index]);
}
printf("\n");
}
/* creates a flag array for segmented scan. Sets to 1 the index from where outgoing vertex starts*/
__global__ void markSegment(unsigned int *d_edgeFlagArray, unsigned int *d_vertex, unsigned int *d_edges, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices) {
d_edgeFlagArray[d_vertex[index]] = 1;
}
}
/*prints new edge and vertex size*/
__global__ void print(unsigned int *d_edgeListSize, unsigned int *d_vertexListSize)
{
printf("Edges: %d, Vertices %d \n", *d_edgeListSize, *d_vertexListSize);
}
/*creates successor array*/
__global__ void createSuccArray(unsigned int *d_successorArray, unsigned int *d_vertices, unsigned int *d_segmentedMinScanInput, unsigned int *d_segmentedMinScanOutput, unsigned int noOfVertices, unsigned int noOfEdges)
{
unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
unsigned int minEdgeIndex;
if(index < noOfVertices) {
//index is same as vertex ID
if (index == noOfVertices-1)
minEdgeIndex = noOfEdges - 1;
else
minEdgeIndex = d_vertices[index+1] - 1; // min value is stored in loc of last neighbour
unsigned int val = d_segmentedMinScanOutput[minEdgeIndex];
//unsigned int minWeight = val >> 22;
unsigned int minVertex = val & (unsigned int)(pow(2.0,22)-1);
d_successorArray[index] = minVertex;
}
}
/*removes cycles from successor array*/
__global__ void eliminateCycles(unsigned int *d_successor, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices) {
unsigned int succIndex = d_successor[d_successor[index]];
if(index == succIndex) {
if(index < d_successor[index]) {
d_successor[index] = index;
} else {
d_successor[d_successor[index]]= d_successor[index];
}
}
}
}
/* hybrid implementation of markSegment function */
__global__ void markSegment1(unsigned int *d_edgeFlagArray, unsigned int *d_vertex, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices && index > 0) {
d_edgeFlagArray[d_vertex[index]] = 1;
}
}
/*This function is to determine which edges are actually needed*/
__global__ void populatePArray(int *d_pickArray, unsigned int *d_vertices, unsigned int *d_successor, unsigned int *d_preIDs, unsigned int noOfVertices, unsigned int noOfEdges)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfEdges) {
if(d_preIDs[index] != d_successor[d_preIDs[index]]) {
if(d_preIDs[index] < (noOfVertices - 1))
d_pickArray[index] = d_vertices[d_preIDs[index]+1] - 1;
else
d_pickArray[index] = noOfEdges - 1;
}
else
d_pickArray[index] = -1;
}
}
/*This function determines which edges will be part of output*/
__global__ void AppendOutputEdges(int *d_pickArray, unsigned int * d_segmentedMinScanInput, unsigned int *d_segmentedMinScanOutput, unsigned int *d_MSTOutput, unsigned int *d_edgeMap, unsigned int noOfEdges)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfEdges && d_pickArray[index] >= 0) {
unsigned int edgeid = d_edgeMap[index];
unsigned int prev = 0;
int temp = -1;
unsigned int segmentedOutput = d_segmentedMinScanOutput[d_pickArray[index]];
unsigned int currIndex = d_segmentedMinScanOutput[index];
if(index > 0) {
temp = d_pickArray[index-1];
prev = d_segmentedMinScanOutput[index-1];
}
if(d_pickArray[index] != temp) {
if(currIndex == segmentedOutput) {
d_MSTOutput[edgeid]=1;
}
} else {
if(currIndex != prev && currIndex == segmentedOutput) {
d_MSTOutput[edgeid]=1;
}
}
}
}
/*This function sets each value of array equal to its index*/
__global__ void setIndices(unsigned int *d_arr,unsigned int size)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < size)
d_arr[index] = index;
}
/* This function copies data from original successorArray so that it can be used for new computation*/
__global__ void makeTempSuccCopy(unsigned int *d_successorArray, unsigned int* d_vertex, unsigned int *d_successorArrayTemp, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices) {
unsigned int t = d_successorArray[index];
d_successorArrayTemp[index] = t;
}
}
/* This function copies data from temporary successorArray so that it can be updated with correct value */
__global__ void updateSuccArray(unsigned int *d_successorArray, unsigned int* d_vertex, unsigned int *d_successorArrayTemp, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices) {
unsigned int t = d_successorArrayTemp[index];
d_successorArray[index] = t;
}
}
/* This function uses pointer doubling to assign representative id to each vertex*/
__global__ void propagateRepVertexID(unsigned int *d_successorArray, bool *d_isSuccUpdated, unsigned int *d_previousIDs, unsigned int *d_successorArrayTemp, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices) {
unsigned int successor = d_successorArray[index];
if(successor != d_successorArray[successor]) { //Eindex = 2 and end = 6 and u = 2 and succ[u] = 2
*d_isSuccUpdated=true;
d_successorArrayTemp[index] = d_successorArray[successor];
}
}
}
/* This function iteratively sets s(s(u)) = u and propogates representative vertex id*/
void propagateID(unsigned int noOfBlocks_vertices, unsigned int noOfThreads_vertices)
{
bool succchange;
bool *d_isSuccUpdated;
cudaMalloc(&d_successorArrayTemp, sizeof(int)*noOfVertices);
cudaMalloc((void**)&d_isSuccUpdated, sizeof(bool));
do
{
succchange=false;
cudaMemcpy(d_isSuccUpdated, &succchange, sizeof(bool), cudaMemcpyHostToDevice);
makeTempSuccCopy<<<noOfBlocks_vertices,noOfThreads_vertices>>>(d_successorArray, d_vertices, d_successorArrayTemp, noOfVertices);
propagateRepVertexID<<<noOfBlocks_vertices,noOfThreads_vertices>>>(d_successorArray, d_isSuccUpdated, d_previousIDs,d_successorArrayTemp, noOfVertices);
updateSuccArray<<<noOfBlocks_vertices,noOfThreads_vertices>>>(d_successorArray, d_vertices, d_successorArrayTemp, noOfVertices);
cudaMemcpy(&succchange, d_isSuccUpdated, sizeof(bool), cudaMemcpyDeviceToHost);
}while(succchange);
cudaFree(d_successorArrayTemp);
cudaFree(d_isSuccUpdated);
}
/*This function creates scan flag*/
void __global__ createScanFlag(unsigned int *d_vertexFlagArray, unsigned int *d_successorArray, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices && index > 0)
{
unsigned int prev_val = d_successorArray[index-1];
unsigned int curr_val = d_successorArray[index];
if (prev_val != curr_val) {
d_vertexFlagArray[index] = 1;
}
}
}
/*This function assigns supervertex id to each vertex*/
__global__ void assignSuperVertexID(unsigned int *d_superVertex, unsigned int *d_indices, unsigned int *d_vertexFlagArray,unsigned int *d_previousIDs,unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices) {
d_vertexFlagArray[d_indices[index]] = d_superVertex[index];
}
}
/* This function updates supervertexid */
__global__ void updateSuperVertexID(unsigned int *d_superVertex,unsigned int *d_arr,unsigned int *d_vertexFlagArray, unsigned int noOfVertices)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfVertices) {
unsigned int newId = d_vertexFlagArray[index];
d_superVertex[index] = newId;
}
}
/* This function removes self edges after successor array is created */
__global__ void removeSelfEdges(unsigned int *d_edges, unsigned int *d_prevIds,unsigned int *d_superVertexID, unsigned int noOfEdges)
{
unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfEdges) {
unsigned int uid = d_superVertexID[d_prevIds[index]]; //vause d_prevIds[index] is 1 to 6 but we need 0 to 5
unsigned int vid = d_superVertexID[d_edges[index]];
if(uid == vid) {
d_edges[index]=INT_MAX;
}
}
}
/* This function is to assign new super edge id*/
__global__ void assignSuperEdgeId(unsigned int *d_superEdgeId, unsigned int *d_previousIds, unsigned int *d_superVertexId, unsigned int *d_edge, unsigned int noOfEdges)
{
unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfEdges)
{
unsigned int x = d_previousIds[index];
unsigned int id = INT_MAX;
if (x != INT_MAX && d_edge[index] != INT_MAX) {
id = d_superVertexId[x];
}
d_superEdgeId[index] = id;
}
}
/* This function is to compress the edge list*/
__global__ void edgeCompression(unsigned int *d_edges, unsigned int *d_weights, unsigned int *d_vertex, unsigned int *d_segmentedMinScanInput, unsigned int *d_segmentedMinScanOutput, unsigned int *d_superVertexID, unsigned int *d_edgeMap, unsigned int *d_edgeMapCopy, unsigned int *d_edgeFlagArray, unsigned int *d_superEdgeId, unsigned int * d_edgeIndices, int *d_pickArray, unsigned int *d_size, unsigned int *d_edgeListSize, unsigned int *d_vertexListSize)
{
unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < *d_size) {
unsigned int id = d_edgeIndices[index];
if(d_superEdgeId[index] != INT_MAX && d_edges[id] != INT_MAX) {
if(index == *d_size-1) {
*d_edgeListSize = index + 1;
*d_vertexListSize = d_superEdgeId[index] + 1;
}
d_segmentedMinScanOutput[index] = d_weights[id];
d_segmentedMinScanInput[index] = d_superVertexID[d_edges[id]];
d_pickArray[index] = d_superEdgeId[index];
d_edgeMapCopy[index] = d_edgeMap[id];
}
}
}
/*This function copies the temporary array to arrays which will be actually used*/
__global__ void copyArrays(unsigned int *d_edges, unsigned int *d_weights, unsigned int *vertex, unsigned int *d_segmentedMinScanInput, unsigned int *d_segmentedMinScanOutput, unsigned int *d_edgeMap, unsigned int *d_edgeMapCopy, unsigned int *d_edgeCopy, unsigned int *d_size)
{
unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < *d_size) {
unsigned int p = d_segmentedMinScanInput[index];
d_edges[index] = p;
unsigned int wt = d_segmentedMinScanOutput[index];
d_weights[index] = wt;
unsigned int mapVal = d_edgeMapCopy[index];
d_edgeMap[index] = mapVal;
}
}
/*This function determines the new edge list*/
__global__ void makeEdgeList(unsigned int *d_edgeFlagArray, unsigned int *d_edges, unsigned int *d_superEdgeId, unsigned int *d_size, unsigned int noOfEdges)
{
unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index == 0) {
d_edgeFlagArray[index] = 1;
} else if(index < noOfEdges && index > 0) {
if(d_superEdgeId[index-1] != INT_MAX && d_superEdgeId[index] == INT_MAX) {
*d_size = index;
}
if(d_superEdgeId[index] > d_superEdgeId[index-1]) {
d_edgeFlagArray[index] = 1;
}
}
}
/*This function helps in creating new vertices list for next iteration*/
__global__ void CreateVertexListFlag(unsigned int *d_edgeFlagArray, unsigned int *d_vertices, int *d_pickArray, unsigned int noOfEdges)
{
unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index == 0) {
d_edgeFlagArray[index] = 1;
} else if(index < noOfEdges && index > 0) {
if(d_pickArray[index] > d_pickArray[index-1]) {
d_edgeFlagArray[index] = 1;
}
}
}
/*This function helps to build new vertex list*/
__global__ void BuildVertexList(unsigned int *d_vertices, unsigned int *d_edges, int *d_pickArray, unsigned int *d_edgeFlagArray, unsigned int noOfEdges)
{
unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x;
if(index < noOfEdges && d_edgeFlagArray[index] == 1) {
d_vertices[d_pickArray[index]] = index;
}
}
/* Parse the input file to setup our graph
* we set the relevant arrays here
*/
void parseInputFile(char *fileName)
{
unsigned int x,temp;
unsigned int edgeNo, weightOfEdge;
FILE *fp;
fp = fopen(fileName,"r");
printf("\n Parsing Input File: \n");
fscanf(fp,"%d",&noOfVertices);
vertices = (unsigned int *)malloc(sizeof(unsigned int) * noOfVertices);
int i;
for (i=0; i<noOfVertices; i++) {
fscanf(fp,"%d %d",&x, &temp);
vertices[i] = x;
}
fscanf(fp,"%d",&temp);
fscanf(fp,"%d",&noOfEdges);
edges = (unsigned int *)malloc(sizeof(unsigned int)*noOfEdges);
weights = (unsigned int *)malloc(sizeof(unsigned int)*noOfEdges);
for(i=0; i<noOfEdges; i++) {
fscanf(fp,"%d %d",&edgeNo, &weightOfEdge);
edges[i] = edgeNo;
weights[i] = weightOfEdge;
}
printf("No. of Vertices in Input: %d\n",noOfVertices);
printf("No. of Edges in Input: %d\n", noOfEdges);
fclose(fp);
}
/* this is to setup configuration parameters for various primitives*/
void setupPlan()
{
cudppCreate(&theCudpp);
scan_config.algorithm = CUDPP_SCAN;
scan_config.op = CUDPP_ADD;
scan_config.datatype = CUDPP_UINT;
scan_config.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_INCLUSIVE;
segmented_min_scan_config.algorithm = CUDPP_SEGMENTED_SCAN;
segmented_min_scan_config.op = CUDPP_MIN;
segmented_min_scan_config.datatype = CUDPP_UINT;
segmented_min_scan_config.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_INCLUSIVE;
config_sort.algorithm = CUDPP_SORT_RADIX;
config_sort.datatype = CUDPP_UINT;
config_sort.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_KEY_VALUE_PAIRS;
}
/* Dynamically allocate necessary arrays*/
void mallocArr()
{
cudaMalloc(&d_segmentedMinScanInput, sizeof(unsigned int )*noOfEdges);
cudaMalloc(&d_weights, sizeof(unsigned int )*noOfEdges);
cudaMalloc(&d_edges, sizeof(unsigned int )*noOfEdges);
cudaMalloc(&d_vertices, sizeof(unsigned int )*noOfVertices);
cudaMalloc(&d_edgeFlagArray, sizeof(unsigned int )*noOfEdges);
cudaMalloc(&d_segmentedMinScanOutput, sizeof(unsigned int )*noOfEdges);
cudaMalloc(&d_successorArray, sizeof(unsigned int )*noOfVertices);
cudaMalloc(&d_previousIDs, sizeof(unsigned int )*noOfEdges);
cudaMalloc(&d_pickArray, sizeof(int )*noOfEdges);
cudaMalloc(&d_superVertexID, sizeof(unsigned int )*noOfVertices);
cudaMalloc(&d_MSTOutput, sizeof(unsigned int )*noOfEdges);
cudaMalloc(&d_indices, sizeof(unsigned int )*noOfVertices);
cudaMalloc(&d_vertexFlagArray, sizeof(unsigned int )*noOfVertices);
cudaMalloc(&d_superVertexID, sizeof(unsigned int )*noOfVertices);
cudaMalloc(&d_size, sizeof(unsigned int ));
cudaMalloc(&d_superEdgeId, sizeof(unsigned int )*noOfEdges);
cudaMalloc(&d_edgeIndices, sizeof(unsigned int )*noOfEdges);
cudaMalloc(&d_edgeListSize, sizeof(unsigned int ));
cudaMalloc(&d_vertexListSize, sizeof(unsigned int ));
cudaMalloc(&d_edgeMapCopy, sizeof(unsigned int )*noOfEdges);
cudaMalloc(&d_edgeMap, sizeof(unsigned int )*noOfEdges);
h_MSTOutput = (unsigned int *)malloc(sizeof(unsigned int )*noOfEdges);
}
/*Free the dynamically allocated memory. Do other cleanup here*/
void cleanUp()
{
cudaFree(d_edgeIndices);
cudaFree(d_superEdgeId);
cudaFree(d_edgeMap);
cudaFree(d_edgeMapCopy);
cudaFree(d_superVertexID);
cudaFree(d_vertexFlagArray);
cudaFree(d_indices);
cudaFree(d_MSTOutput);
cudaFree(d_previousIDs);
cudaFree(d_pickArray);
cudaFree(d_successorArray);
cudaFree(d_segmentedMinScanOutput);
cudaFree(d_edgeFlagArray);
cudaFree(d_vertices);
cudaFree(d_edges);
cudaFree(d_weights);
cudaFree(d_segmentedMinScanInput);
cudaFree(d_size);
cudaFree(d_edgeListSize);
cudaFree(d_vertexListSize);
cudppDestroy(theCudpp);
free(h_MSTOutput);
free(edges);
free(vertices);
free(weights);
}
/* Do basic initialization*/
void initialize()
{
unsigned int i;
cudaMemcpy(d_vertices, vertices, sizeof(unsigned int)*noOfVertices, cudaMemcpyHostToDevice);
cudaMemcpy(d_edges, edges, sizeof(unsigned int)*noOfEdges, cudaMemcpyHostToDevice);
cudaMemcpy(d_weights, weights, sizeof(unsigned int)*noOfEdges, cudaMemcpyHostToDevice);
unsigned int *temp = (unsigned int *)malloc(sizeof(unsigned int)*noOfEdges);
for(i=0; i<noOfEdges; i++)
temp[i] = 0;
cudaMemcpy(d_MSTOutput, temp, sizeof(unsigned int )*noOfEdges, cudaMemcpyHostToDevice);
for(i=0; i<noOfEdges; i++)
temp[i]=i;
cudaMemcpy(d_edgeMap, temp, sizeof(unsigned int)*noOfEdges, cudaMemcpyHostToDevice);
free(temp);
}
/* Helper function to determine no of threads to be used */
unsigned int getNoOfThreads(unsigned int size) {
unsigned int threadsPerBlock;
if (size <= 1024)
threadsPerBlock = size;
else
threadsPerBlock = 1024;
return threadsPerBlock;
}
void boruvka()
{
unsigned int noOfThreads_edge = getNoOfThreads(noOfEdges);
unsigned int noOfBlocks_edge = (noOfEdges+1024)/noOfThreads_edge;
unsigned int noOfThreads_vertices = getNoOfThreads(noOfVertices);
unsigned int noOfBlocks_vertices = (noOfVertices+1024)/noOfThreads_vertices;
cudaError_t error;
mergeEdgeAndWeight<<<noOfBlocks_edge, noOfThreads_edge>>>(d_segmentedMinScanInput, d_vertices, d_weights, d_edges, noOfEdges);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("0.1 CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
initArray<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeFlagArray, noOfEdges);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("At line 577 CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
markSegment<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_edgeFlagArray, d_vertices, d_edges, noOfVertices);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("3 CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
cudppPlan(theCudpp, &segmentedScanPlan_min,segmented_min_scan_config, noOfEdges, 1, 0 ); //Make the segmented min scan plan
cudppSegmentedScan(segmentedScanPlan_min, d_segmentedMinScanOutput, d_segmentedMinScanInput, (const unsigned int *)d_edgeFlagArray, noOfEdges);
cudppDestroyPlan(segmentedScanPlan_min);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("CUDA error: %s\n", cudaGetErrorString(error));
// exit(-1);
}
createSuccArray<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_successorArray, d_vertices, d_segmentedMinScanInput, d_segmentedMinScanOutput, noOfVertices, noOfEdges);
eliminateCycles<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_successorArray, noOfVertices);
initArray<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeFlagArray, noOfEdges);
markSegment1<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_edgeFlagArray, d_vertices, noOfVertices);
cudppPlan(theCudpp, &scanPlan, scan_config, noOfEdges, 1, 0);
cudppScan(scanPlan, d_previousIDs, d_edgeFlagArray, noOfEdges);
cudppDestroyPlan(scanPlan);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("At line 614 CUDA error: %s\n", cudaGetErrorString(error));
}
initArray<<<noOfBlocks_edge, noOfThreads_edge>>>((unsigned int*)d_pickArray, noOfEdges);
populatePArray<<<noOfBlocks_edge, noOfThreads_edge>>>(d_pickArray, d_vertices, d_successorArray, d_previousIDs, noOfVertices, noOfEdges);
cudaThreadSynchronize();
AppendOutputEdges<<<noOfBlocks_edge, noOfThreads_edge>>>(d_pickArray, d_segmentedMinScanInput, d_segmentedMinScanOutput, d_MSTOutput, d_edgeMap, noOfEdges);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("At line 626 CUDA error: %s\n", cudaGetErrorString(error));
}
propagateID(noOfBlocks_vertices, noOfThreads_vertices);
setIndices<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_indices,noOfVertices);
cudppPlan(theCudpp, &sortPlan, config_sort, noOfVertices, 1, 0);
cudppRadixSort(sortPlan, d_successorArray, d_indices, noOfVertices);
cudppDestroyPlan(sortPlan);
initArray<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_vertexFlagArray,noOfVertices);
createScanFlag<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_vertexFlagArray, d_successorArray,noOfVertices);
cudppPlan(theCudpp, &scanPlan, scan_config, noOfVertices, 1, 0);
cudppScan(scanPlan, d_superVertexID, d_vertexFlagArray, noOfVertices);
cudppDestroyPlan(scanPlan);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("At line 645 CUDA error: %s\n", cudaGetErrorString(error));
}
assignSuperVertexID<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_superVertexID,d_indices,d_vertexFlagArray,d_previousIDs,noOfVertices);
updateSuperVertexID<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_superVertexID,d_indices,d_vertexFlagArray, noOfVertices);
removeSelfEdges<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edges,d_previousIDs,d_superVertexID,noOfEdges);
assignSuperEdgeId<<<noOfBlocks_edge, noOfThreads_edge>>>(d_superEdgeId,d_previousIDs, d_superVertexID, d_edges, noOfEdges);
setIndices<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeIndices,noOfEdges);
cudppPlan(theCudpp, &sortPlan, config_sort, noOfEdges, 1, 0);
cudppRadixSort(sortPlan, d_superEdgeId, d_edgeIndices, noOfEdges);
cudppDestroyPlan(sortPlan);
initArray<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeFlagArray, noOfEdges);
unsigned int h_size = noOfEdges + 1;
cudaMemcpy(d_size,&h_size,sizeof(unsigned int ), cudaMemcpyHostToDevice);
makeEdgeList<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeFlagArray, d_edges, d_superEdgeId, d_size, noOfEdges);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("At line 668 CUDA error: %s\n", cudaGetErrorString(error));
}
unsigned int zero = 0;
cudaMemcpy(d_edgeListSize, &zero, sizeof(unsigned int ), cudaMemcpyHostToDevice);
cudaMemcpy(d_vertexListSize, &zero, sizeof(unsigned int ), cudaMemcpyHostToDevice);
initArray<<<noOfBlocks_edge, noOfThreads_edge>>>(d_segmentedMinScanInput, noOfEdges);
initArray<<<noOfBlocks_edge, noOfThreads_edge>>>(d_segmentedMinScanOutput, noOfEdges);
initArray<<<noOfBlocks_edge, noOfThreads_edge>>>((unsigned int*)d_pickArray, noOfEdges);
initArray<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeMapCopy, noOfEdges);
cudaMemcpy(&h_size,d_size,sizeof(unsigned int ), cudaMemcpyDeviceToHost);
unsigned int noOfThreads_new = getNoOfThreads(h_size);
unsigned int noOfBlocks_new = (h_size+1024)/noOfThreads_new;
edgeCompression<<<noOfBlocks_new, noOfThreads_new>>>(d_edges, d_weights, d_vertices, d_segmentedMinScanInput, d_segmentedMinScanOutput, d_superVertexID, d_edgeMap, d_edgeMapCopy, d_edgeFlagArray, d_superEdgeId, d_edgeIndices, d_pickArray, d_size, d_edgeListSize, d_vertexListSize);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("At line 688 CUDA error: %s\n", cudaGetErrorString(error));
}
copyArrays<<<noOfBlocks_new, noOfThreads_new>>>(d_edges, d_weights, d_vertices, d_segmentedMinScanInput, d_segmentedMinScanOutput, d_edgeMap, d_edgeMapCopy, d_edgeFlagArray, d_size);
initArray<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeFlagArray, noOfEdges);
initArray<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_vertices, noOfVertices);
CreateVertexListFlag<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeFlagArray, d_vertices, d_pickArray, noOfEdges);
BuildVertexList<<<noOfBlocks_edge, noOfThreads_edge>>>(d_vertices, d_edges, d_pickArray, d_edgeFlagArray, noOfEdges);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("after build vertex listlast CUDA error: %s\n", cudaGetErrorString(error));
}
cudaMemcpy(&noOfEdges, d_edgeListSize, sizeof(unsigned int ), cudaMemcpyDeviceToHost);
cudaMemcpy(&noOfVertices, d_vertexListSize, sizeof(unsigned int ), cudaMemcpyDeviceToHost);
printf("for next round, no of edges = %d and no of vertices = %d\n",noOfEdges, noOfVertices);
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("last CUDA error: %s\n", cudaGetErrorString(error));
}
}
int main (int argc, char** argv)
{
unsigned int noOfMSTEdges = 0;
unsigned long long int finalMSTWeight = 0;
unsigned int i;
parseInputFile(argv[1]);
noOfVerticesOriginal = noOfVertices;
noOfEdgesOriginal = noOfEdges;
mallocArr();
initialize();
setupPlan();
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
do {
boruvka();
}while(noOfVertices > 1);
cudaThreadSynchronize();
gettimeofday(&tv2, NULL);
printf ("Total Execution time = %f seconds\n", (double)(tv2.tv_usec - tv1.tv_usec) / 1000000 + (double)(tv2.tv_sec - tv1.tv_sec));
cudaMemcpy(h_MSTOutput, d_MSTOutput, sizeof(unsigned int )*noOfEdgesOriginal, cudaMemcpyDeviceToHost);
for(i=0; i<noOfEdgesOriginal; i++) {
if(h_MSTOutput[i] == 1) {
finalMSTWeight += weights[i];
noOfMSTEdges++;
}
}
printf("\nNo. of edges in MST [must be equal to (%d-1)]: %d\n", noOfVerticesOriginal, noOfMSTEdges);
printf("Final Weight of resultant MST: %llu\n", finalMSTWeight);
cleanUp();
return 0;
}
|
81095133018984d419a80e02c3b0b77cc46c2e5e.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include "common.h"
#include "efficient.h"
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
#define blockSize 128
__global__ void kernUpSweep(int N, int stride, int halfStride, int * data)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N)
return;
index = (index + 1) * stride - 1;
data[index] += data[index - halfStride];
}
__global__ void kernDownSweepFirst(int N, int stride, int halfStride, int * data)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N)
return;
index = (index + 1) * stride - 1;
int tmp = data[index - halfStride];
// Swap
data[index - halfStride] = 0;
// Add, replace
data[index] = tmp;
}
__global__ void kernDownSweep(int N, int stride, int halfStride, int * data)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N)
return;
index = (index + 1) * stride - 1;
int value = data[index];
int tmp = data[index - halfStride];
// Swap
data[index - halfStride] = value;
// Add, replace
data[index] = value + tmp;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *dev_data)
{
int passes = ilog2ceil(n);
for (int d = 0; d < passes; ++d)
{
int stride = pow(2, d + 1);
int halfStride = stride / 2;
int sliceElements = n / stride;
//printf("%d, %d, %d \n", sliceElements, stride, halfStride);
dim3 blocks((sliceElements + blockSize - 1) / blockSize);
kernUpSweep << <blocks, blockSize >> > (sliceElements, stride, halfStride, dev_data);
checkCUDAErrorFn("kernUpSweep failed!");
}
for (int d = passes - 1; d >= 0; --d)
{
int stride = pow(2, d + 1);
int halfStride = stride / 2;
int sliceElements = n / stride;
//printf("%d, %d, %d \n", sliceElements, stride, halfStride);
dim3 blocks((sliceElements + blockSize - 1) / blockSize);
if (d == passes - 1)
{
kernDownSweepFirst << <blocks, blockSize >> > (sliceElements, stride, halfStride, dev_data);
checkCUDAErrorFn("kernDownSweepFirst failed!");
}
else
{
kernDownSweep << <blocks, blockSize >> > (sliceElements, stride, halfStride, dev_data);
checkCUDAErrorFn("kernDownSweep failed!");
}
}
}
void scan(int n, int *odata, const int *idata)
{
int * dev_data;
int passes = ilog2ceil(n);
int squareN = pow(2, passes);
//printf("%d vs %d\n", n, squareN);
hipMalloc((void**)&dev_data, squareN * sizeof(int));
checkCUDAErrorFn("hipMalloc dev_data failed!");
// calloc
hipMemset(dev_data, 0, squareN * sizeof(int));
hipMemcpy(dev_data, idata, sizeof(int) * n, hipMemcpyHostToDevice);
checkCUDAErrorFn("hipMemcpy dev_data failed!");
timer().startGpuTimer();
scan(squareN, dev_data);
timer().endGpuTimer();
hipMemcpy(odata, dev_data, sizeof(int) * n, hipMemcpyDeviceToHost);
checkCUDAErrorFn("hipMemcpy dev_data failed!");
hipFree(dev_data);
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata)
{
int * dev_data;
int * dev_booleans;
int * dev_data_output;
int passes = ilog2ceil(n);
int squareN = pow(2, passes);
hipMalloc((void**)&dev_data_output, squareN * sizeof(int));
checkCUDAErrorFn("hipMalloc dev_data failed!");
hipMalloc((void**)&dev_booleans, squareN * sizeof(int));
checkCUDAErrorFn("hipMalloc dev_data failed!");
hipMalloc((void**)&dev_data, squareN * sizeof(int));
checkCUDAErrorFn("hipMalloc dev_data failed!");
// calloc
hipMemset(dev_data_output, 0, squareN * sizeof(int));
hipMemset(dev_data, 0, squareN * sizeof(int));
hipMemcpy(dev_data, idata, sizeof(int) * n, hipMemcpyHostToDevice);
checkCUDAErrorFn("hipMemcpy dev_data failed!");
timer().startGpuTimer();
dim3 blocks((squareN + blockSize - 1) / blockSize);
StreamCompaction::Common::kernMapToBoolean << <blocks, blockSize >> > (squareN, dev_booleans, dev_data);
checkCUDAErrorFn("kernMapToBoolean failed!");
scan(squareN, dev_booleans);
int sum = 0;
hipMemcpy(&sum, &dev_booleans[squareN-1], sizeof(int), hipMemcpyDeviceToHost);
// Note: I removed one of the input arrays
StreamCompaction::Common::kernScatter << <blocks, blockSize >> > (squareN, dev_data_output, dev_data, dev_booleans);
checkCUDAErrorFn("kernScatter failed!");
timer().endGpuTimer();
hipMemcpy(odata, dev_data_output, sizeof(int) * sum, hipMemcpyDeviceToHost);
checkCUDAErrorFn("hipMemcpy dev_booleans failed!");
hipFree(dev_data);
hipFree(dev_data_output);
hipFree(dev_booleans);
return sum;
}
}
}
| 81095133018984d419a80e02c3b0b77cc46c2e5e.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "common.h"
#include "efficient.h"
namespace StreamCompaction {
namespace Efficient {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
#define blockSize 128
__global__ void kernUpSweep(int N, int stride, int halfStride, int * data)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N)
return;
index = (index + 1) * stride - 1;
data[index] += data[index - halfStride];
}
__global__ void kernDownSweepFirst(int N, int stride, int halfStride, int * data)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N)
return;
index = (index + 1) * stride - 1;
int tmp = data[index - halfStride];
// Swap
data[index - halfStride] = 0;
// Add, replace
data[index] = tmp;
}
__global__ void kernDownSweep(int N, int stride, int halfStride, int * data)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N)
return;
index = (index + 1) * stride - 1;
int value = data[index];
int tmp = data[index - halfStride];
// Swap
data[index - halfStride] = value;
// Add, replace
data[index] = value + tmp;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *dev_data)
{
int passes = ilog2ceil(n);
for (int d = 0; d < passes; ++d)
{
int stride = pow(2, d + 1);
int halfStride = stride / 2;
int sliceElements = n / stride;
//printf("%d, %d, %d \n", sliceElements, stride, halfStride);
dim3 blocks((sliceElements + blockSize - 1) / blockSize);
kernUpSweep << <blocks, blockSize >> > (sliceElements, stride, halfStride, dev_data);
checkCUDAErrorFn("kernUpSweep failed!");
}
for (int d = passes - 1; d >= 0; --d)
{
int stride = pow(2, d + 1);
int halfStride = stride / 2;
int sliceElements = n / stride;
//printf("%d, %d, %d \n", sliceElements, stride, halfStride);
dim3 blocks((sliceElements + blockSize - 1) / blockSize);
if (d == passes - 1)
{
kernDownSweepFirst << <blocks, blockSize >> > (sliceElements, stride, halfStride, dev_data);
checkCUDAErrorFn("kernDownSweepFirst failed!");
}
else
{
kernDownSweep << <blocks, blockSize >> > (sliceElements, stride, halfStride, dev_data);
checkCUDAErrorFn("kernDownSweep failed!");
}
}
}
void scan(int n, int *odata, const int *idata)
{
int * dev_data;
int passes = ilog2ceil(n);
int squareN = pow(2, passes);
//printf("%d vs %d\n", n, squareN);
cudaMalloc((void**)&dev_data, squareN * sizeof(int));
checkCUDAErrorFn("cudaMalloc dev_data failed!");
// calloc
cudaMemset(dev_data, 0, squareN * sizeof(int));
cudaMemcpy(dev_data, idata, sizeof(int) * n, cudaMemcpyHostToDevice);
checkCUDAErrorFn("cudaMemcpy dev_data failed!");
timer().startGpuTimer();
scan(squareN, dev_data);
timer().endGpuTimer();
cudaMemcpy(odata, dev_data, sizeof(int) * n, cudaMemcpyDeviceToHost);
checkCUDAErrorFn("cudaMemcpy dev_data failed!");
cudaFree(dev_data);
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int n, int *odata, const int *idata)
{
int * dev_data;
int * dev_booleans;
int * dev_data_output;
int passes = ilog2ceil(n);
int squareN = pow(2, passes);
cudaMalloc((void**)&dev_data_output, squareN * sizeof(int));
checkCUDAErrorFn("cudaMalloc dev_data failed!");
cudaMalloc((void**)&dev_booleans, squareN * sizeof(int));
checkCUDAErrorFn("cudaMalloc dev_data failed!");
cudaMalloc((void**)&dev_data, squareN * sizeof(int));
checkCUDAErrorFn("cudaMalloc dev_data failed!");
// calloc
cudaMemset(dev_data_output, 0, squareN * sizeof(int));
cudaMemset(dev_data, 0, squareN * sizeof(int));
cudaMemcpy(dev_data, idata, sizeof(int) * n, cudaMemcpyHostToDevice);
checkCUDAErrorFn("cudaMemcpy dev_data failed!");
timer().startGpuTimer();
dim3 blocks((squareN + blockSize - 1) / blockSize);
StreamCompaction::Common::kernMapToBoolean << <blocks, blockSize >> > (squareN, dev_booleans, dev_data);
checkCUDAErrorFn("kernMapToBoolean failed!");
scan(squareN, dev_booleans);
int sum = 0;
cudaMemcpy(&sum, &dev_booleans[squareN-1], sizeof(int), cudaMemcpyDeviceToHost);
// Note: I removed one of the input arrays
StreamCompaction::Common::kernScatter << <blocks, blockSize >> > (squareN, dev_data_output, dev_data, dev_booleans);
checkCUDAErrorFn("kernScatter failed!");
timer().endGpuTimer();
cudaMemcpy(odata, dev_data_output, sizeof(int) * sum, cudaMemcpyDeviceToHost);
checkCUDAErrorFn("cudaMemcpy dev_booleans failed!");
cudaFree(dev_data);
cudaFree(dev_data_output);
cudaFree(dev_booleans);
return sum;
}
}
}
|
2637670e9e4a14870e360a6826753a085dd43761.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#define CHECK(call)
{
const hipError_t error = call;
if(error != hipSuccess)
{
printf("Error: %s:%d, ", __FILE__, __LINE__);
printf("code: %d, reason: %s\n", error, hipGetErrorString(error));
exit(1);
}
}
void checkResult(float *hostRef, float *gpuRef, const int N) {
double epsilon = 1.0E-8; // 0.000000001
bool match = 1;
for(int i = 0; i < N; i++) {
if(abs(hostRef[i] - gpuRef[i]) > epsilon) {
match = 0;
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i], i);
break;
}
}
}
void initialData(float *ip, int size) {
// generate different seed for random number
time_t t;
srand((unsigned)time(&t));
for(int i = 0; i < size; i++) {
ip[i] = (float)(rand() & 0xFF)/10.0f; // 0-25.5
}
}
void sumArraysOnHost(float *A, float *B, float *C, const int N) {
for(int idx = 0; idx < N; idx++) {
C[idx] = A[idx] + B[idx];
}
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C) {
int i = threadIdx.x;
C[i] = A[idx] + B[idx];
}
int main(int argc, char **argv) {
printf("%s Starting...\n", argv[0])
// set up device
int dev = 0;
hipSetDevice(dev);
// set up data size of vectors
int nElem = 32;
printf("Vector size %d\n", nElem);
// malloc host memory
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
initialData(h_A, nElem);
initialData(h_B, nElem);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// malloc device global memory
float *d_A, *d_B, *d_C;
hipMalloc((float **)&d_A, nBytes);
hipMalloc((float **)&d_B, nBytes);
hipMalloc((float **)&d_C, nBytes);
// transfer data from host to device
hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice);
// invoke kernel at host side
dim3 block(nElem); // block
dim3 grid(nElem/block.x); // gridblock
// 3232
hipLaunchKernelGGL(( sumArraysOnGPU), dim3(grid),dim3(block), 0, 0, d_A, d_B, d_C);
printf("Execution configuration <<<%d, %d>>>\n", grid.x, block.x);
// copy kernel result back to host side
hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost);
// add vector at host side for result checks
sumArraysOnHost(h_A, h_B, hostRef, nElem);
// check device results
checkResult(hostRef, gpuRef, nElem);
// free device global memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
return 0
} | 2637670e9e4a14870e360a6826753a085dd43761.cu | #include <cuda_runtime.h>
#include <stdio.h>
#define CHECK(call)
{
const cudaError_t error = call;
if(error != cudaSuccess)
{
printf("Error: %s:%d, ", __FILE__, __LINE__);
printf("code: %d, reason: %s\n", error, cudaGetErrorString(error));
exit(1);
}
}
void checkResult(float *hostRef, float *gpuRef, const int N) {
double epsilon = 1.0E-8; // 0.000000001
bool match = 1;
for(int i = 0; i < N; i++) {
if(abs(hostRef[i] - gpuRef[i]) > epsilon) {
match = 0;
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i], i);
break;
}
}
}
void initialData(float *ip, int size) {
// generate different seed for random number
time_t t;
srand((unsigned)time(&t));
for(int i = 0; i < size; i++) {
ip[i] = (float)(rand() & 0xFF)/10.0f; // 0-25.5
}
}
void sumArraysOnHost(float *A, float *B, float *C, const int N) {
for(int idx = 0; idx < N; idx++) {
C[idx] = A[idx] + B[idx];
}
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C) {
int i = threadIdx.x;
C[i] = A[idx] + B[idx];
}
int main(int argc, char **argv) {
printf("%s Starting...\n", argv[0])
// set up device
int dev = 0;
cudaSetDevice(dev);
// set up data size of vectors
int nElem = 32;
printf("Vector size %d\n", nElem);
// malloc host memory
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
initialData(h_A, nElem);
initialData(h_B, nElem);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// malloc device global memory
float *d_A, *d_B, *d_C;
cudaMalloc((float **)&d_A, nBytes);
cudaMalloc((float **)&d_B, nBytes);
cudaMalloc((float **)&d_C, nBytes);
// transfer data from host to device
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
// invoke kernel at host side
dim3 block(nElem); // 表示每个block中有多少个线程
dim3 grid(nElem/block.x); // 表示每个grid有多少个block
// 向量大小为32,执行配置被放在一个块内,其中包含32个元素
sumArraysOnGPU<<<grid,block>>>(d_A, d_B, d_C);
printf("Execution configuration <<<%d, %d>>>\n", grid.x, block.x);
// copy kernel result back to host side
cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
// add vector at host side for result checks
sumArraysOnHost(h_A, h_B, hostRef, nElem);
// check device results
checkResult(hostRef, gpuRef, nElem);
// free device global memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
return 0
} |
2e9578569ddfccf317b3cfe2889e01e2faaadf87.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#include <time.h>
#include <string>
#include <stdlib.h>
#include <new>
#define SIZE 300000*4
using namespace std;
__global__ void kMartixByMatrixElementwise(const int nThreads, const float *m1, const float *m2, float *output) {
/* Te almacena el el resultados de dos arreglos (elementos acertados)
Retorna un array donde los elementos calculados son almacenados aqui
*/
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
output[i] = m1[i] * m2[i];
}
}
__device__ float* dMartixByMatrixElementwise(const float *m1, const float *m2, float *output, const int width, const int height){
hipLaunchKernelGGL(( kMartixByMatrixElementwise) , dim3(width), dim3(height) , 0, 0, width * height, m1, m2, output );
//kMartixByMatrixElementwise <<< width/8, height/8 >>> ( width * height, m1, m2, output );
hipDeviceSynchronize();
return output;
}
__global__ void kMartixSubstractMatrix(const int nThreads, const float *m1, const float *m2, float *output) {
//Computa los elementos diferenciados entre dos arrays
//Retorna un array donde los elementos calculados son almacenados aqui
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
output[i] = m1[i] - m2[i];
}
}
__device__ float* dMartixSubstractMatrix(const float *m1, const float *m2, float *output, const int width, const int height){
//X: size de valores iniciales
//x_w:Nmero de entradas
//x_h:Nmero de logs
//y:size de valores esprados
//y_w:(1)cantidad valores esperados
//l1:size para capa 1
//l1_w:numero de neuronas ocultas
//l_1_d:size derivada de layer 1
//pred: size de valores de Prediccion
//pred_d: valores de prediccion derivada
//WO:sizePesos iniciales
//w1:size de pesos de capa oculta
//buffer: size de valores de salidad
//--ACAC ,E QUEDEEEdMartixSubstractMatrix(y, pred, pred_d, X_h, y_w), dSigmoid_d(pred, buffer, X_h, y_w), pred_d, X_h, y_w );
hipLaunchKernelGGL(( kMartixSubstractMatrix) , dim3(width), dim3(height) , 0, 0, width * height, m1, m2, output );
//kMartixSubstractMatrix <<< width/8, height/8 >>> ( width * height, m1, m2, output );
hipDeviceSynchronize();
return output;
}
__global__ void kSigmoid(const int nThreads, float const *input, float *output){
/* caulcula la funcion sigmoidaal f(x) = 1/(1 + e^-x).
*/
//nThreads: numero de entradas x Numero de neuronas ocultas
//input: size para capa 1
//ouput:size para capa 1
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
output[i] = 1.0 / (1.0 + ::exp(-input[i]));
}
}
__device__ void dSigmoid(float const *input, float *output, const int height, const int width){
//input: funcion dDot(m1*m2)
//ouput:size para capa 1
//heigth:numero de entradas
//width:numero de neuronas ocultas
hipLaunchKernelGGL(( kSigmoid) , dim3(height), dim3(width) , 0, 0, height * width, input, output);
//kSigmoid <<< height/8, width/8 >>> (height * width, input, output);
hipDeviceSynchronize();
}
__global__ void kSigmoid_d(const int nThreads, float const *input, float *output) {
/* calcula la derivada de la funcion sigmoidal f'(x) = f(x)(1 - f(x)),
salida: arreglo alamcenado aqui x(1 - x) para cada elemento de la matriz input m1
*/
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
output[i] = input[i] * (1 - input[i]);
}
}
__device__ float* dSigmoid_d(float const *input, float *output, const int rows, const int columns){
hipLaunchKernelGGL(( kSigmoid_d) , dim3(rows), dim3(columns) , 0, 0, rows*columns, input, output);
//kSigmoid_d <<< rows/8, columns/8>>> (rows*columns, input, output);
hipDeviceSynchronize();
return output;
}
__global__ void kDot(const int nThreads, const float *m1, const float *m2, float *output, const int m1_rows , const int m1_columns, const int m2_columns ){
//Calcula el producto de dos matrices, m1 y m2 arrays inputs,
//salida:m1*m2
//nThreads:multiplicacion por numero de salidas y entradas
//m1:size para capa 1
//m2: size para pesos de capa ocultas
//output: size de valores de prediccins
//m1_rows: Numero de logs
//m1_columns: Numero de neuronas ocultas
//m2_columns; Numero de cantidad de valores esperados
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
int r = (int)i / m2_columns;
int c = i % m2_columns;
float t_output = 0.f;
for( int k = 0; k < m1_columns; ++k ) {
t_output += m1[ r * m1_columns + k ] * m2[ k * m2_columns + c ];
}
output[i] = t_output;
}
}
__device__ float* dDot(const float *m1, const float *m2, float *output, const int m1_rows , const int m1_columns, const int m2_columns ){
//m1:size para capa 1
//m2:size de pesos de capa oculta
//output: size de valores de Prediccion
//m1_rows:Nmero de logs
//m1_columns:numero de neuronas ocultas
//m2_columns:(1)cantidad valores esperados
//funcion dDot(l1, W1, pred, X_h, l1_w, y_w)
hipLaunchKernelGGL(( kDot) , dim3(m1_rows), dim3(m2_columns) , 0, 0, m1_rows * m2_columns, m1, m2, output, m1_rows , m1_columns, m2_columns );
//kDot <<< m1_rows/8, m2_columns/8>>> (m1_rows * m2_columns, m1, m2, output, m1_rows , m1_columns, m2_columns );
hipDeviceSynchronize();
return output;
}
__global__ void kDot_m1_m2T(const int nThreads, const float *m1, const float *m2, float *output, const int m1_columns, const int m2_rows ){
//actualiza las salidas con el producto de dos dosmatrices trasnpuesta
//salida: producto de dos arrays
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
int r = (int)i / m2_rows;
int c = i % m2_rows;
float t_output = 0.0;
int id_T;
for( int k = 0; k < m1_columns; ++k ) {
id_T = c * m1_columns + k;
t_output += m1[ r * m1_columns + k ] * m2[ id_T ];
}
output[i] = t_output;
}
}
__device__ float* dDot_m1_m2T(const float *m1, const float *m2, float *output, const int m1_rows , const int m1_columns, const int m2_rows )
{
hipLaunchKernelGGL(( kDot_m1_m2T) , dim3(m1_rows), dim3(m2_rows) , 0, 0, m1_rows * m2_rows, m1, m2, output, m1_columns, m2_rows );
//kDot_m1_m2T <<< m1_rows/8, m2_rows/8 >>> ( m1_rows * m2_rows, m1, m2, output, m1_columns, m2_rows );
hipDeviceSynchronize();
return output;
}
__global__ void kDot_m1T_m2(const int nThreads, const float *m1, const float *m2, float *output, const int m1_rows,
const int m1_columns, const int m2_columns ){
//Incrementa la salida de la matriz con el producto de dos matrices: m1 trasnpuesta con m2
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
int r = (int)i / m2_columns;
int c = i % m2_columns;
int id_T;
float t_output = 0.0;
for( int k = 0; k < m1_rows; ++k ) {
id_T = k * m1_columns + r;
t_output += m1[ id_T ] * m2[ k * m2_columns + c ];
}
output[i] += t_output;
}
}
__device__ void dDot_m1T_m2(const float *m1, const float *m2, float *output, const int m1_height , const int m1_width, const int m2_width )
{
hipLaunchKernelGGL(( kDot_m1T_m2) , dim3(m1_width), dim3(m2_width) , 0, 0, m1_width * m2_width, m1, m2, output, m1_height, m1_width, m2_width );
//kDot_m1T_m2 <<< m1_width/8, m2_width/8 >>> (m1_width * m2_width, m1, m2, output, m1_height, m1_width, m2_width );
hipDeviceSynchronize();
}
__device__ void kPrintMatrix (const float* M, int h, int w) {
// imprime hxw
for (int i = 0; i < h; i++){
for (int j = 0; j < w; j++){
printf("%f ", M[i*w+j]);
}
printf("\n");
}
printf("\n");
}
__global__ void ktrain( const float* X, const int X_w, const int X_h,
const float* y, const int y_w,
float* l1, const int l1_w, float* l_1_d,
float* pred, float* pred_d,
float* W0,
float* W1,
float* buffer
)
{
for (unsigned i = 0; i < 50; ++i) {//numero de epocas
//X: size de valores iniciales
//x_w:Nmero de entradas
//x_h:Nmero de logs
//y:size de valores esprados
//y_w:(1)cantidad valores esperados
//l1:size para capa 1
//l1_w:numero de neuronas ocultas
//l_1_d:size derivada de layer 1
//pred: size de valores de Prediccion
//pred_d: valores de prediccion derivada
//WO:sizePesos iniciales
//w1:size de pesos de capa oculta
//buffer: size de valores de salidad
dSigmoid(dDot(X, W0, l1, X_h, X_w, l1_w), l1, X_h, l1_w);// capa 1
dSigmoid(dDot(l1, W1, pred, X_h, l1_w, y_w), pred, X_h, y_w);//cape 2
dMartixByMatrixElementwise(dMartixSubstractMatrix(y, pred, pred_d, X_h, y_w), dSigmoid_d(pred, buffer, X_h, y_w), pred_d, X_h, y_w );
dMartixByMatrixElementwise(dDot_m1_m2T(pred_d, W1, l_1_d, X_h, y_w, l1_w), dSigmoid_d(l1, buffer, X_h, l1_w), l_1_d, X_h, l1_w);
dDot_m1T_m2( l1, pred_d, W1, X_h, l1_w, y_w );
dDot_m1T_m2( X, l_1_d, W0, X_h, X_w, l1_w );
}
}
__host__ float * read(){
FILE *archivo;
// float *array = malloc(sizeof(float)*SIZE);
float *array = new float[SIZE];
// static float array[SIZE];
double i;
float n;
archivo = fopen("oversample.txt","rt");
i = fscanf(archivo,"%f, ",&n);
int k = 0;
while(i != EOF){
if(k == SIZE) break;
array[k] = n;
fscanf(archivo,"%f, ",&n);
k++;
}
printf("%d\n",k );
fclose(archivo);
return array;
}
__host__ float * data_range(int begin, int end , float * array){
// float * arr = malloc((end-begin)*sizeof(float));
//int local_size = end - begin;
//static float arr[local_size];
float * arr=new float[end-begin];
int i = 0;
for(int k = begin; k < end; k++ ){
arr[i] = array[k];
// printf("%f \n",arr[i] );
i++;
}
return arr;
}
int main(void){
const int TRAINING_SIZE = 30000;//numero de logs
const int TRAINING_DIM = 4; //numero de variables
int L1_SIZE=4; //numero de neuronas
clock_t a,b;
//declaracion de eventos
hipEvent_t start;
hipEvent_t stop;
float tiempo;
float *data = read();
int inicio = 0*4, fin = 30000*4;
float *h_X = data_range(inicio, fin, data);
/*for(int i = 0; i < 100; i++){
printf("%d \n",data[i]);
}*/
/*for(int i=0;i<22;i++){
printf("%f\n",h_X[i]);
}*/
int i=0;
while(i<=10){
i++;
L1_SIZE=i*10;
// float h_X;
tiempo=0;
//scanf("%i\n",&L1_SIZE );
a=clock();
//Creacin de eventos
hipEventCreate(&start);
hipEventCreate(&stop);
/* float h_X[TRAINING_SIZE*TRAINING_DIM] = {
5.1,3.5,1.4,0.2,
4.9,3.0,1.4,0.2,
4.7,3.2,1.3,0.2,
4.6,3.1,1.5,0.2,
5.0,3.6,1.4,0.2,
5.4,3.9,1.7,0.4,
4.6,3.4,1.4,0.3, //buffer: size de valores de salidad
dMartixSubstractMatrix(y, pred, pred_d, X_h, y_w), dSigmoid_d(pred, buffer, X_h, y_w), pred_d, X_h, y_w );
5.0,3.4,1.5,0.2,
4.4,2.9,1.4,0.2,
4.9,3.1,1.5,0.1,
5.4,3.7,1.5,0.2,
4.8,3.4,1.6,0.2,
4.8,3.0,1.4,0.1,
4.3,3.0,1.1,0.1,
5.8,4.0,1.2,0.2,
5.7,4.4,1.5,0.4,
5.4,3.9,1.3,0.4,
5.1,3.5,1.4,0.3,
5.7,3.8,1.7,0.3,
5.1,3.8,1.5,0.3,
5.4,3.4,1.7,0.2,
5.1,3.7,1.5,0.4,
4.6,3.6,1.0,0.2,
5.1,3.3,1.7,0.5,
4.8,3.4,1.9,0.2,
5.0,3.0,1.6,0.2,
5.0,3.4,1.6,0.4,
5.2,3.5,1.5,0.2, //buffer: size de valores de salidad
dMartixSubstractMatrix(y, pred, pred_d, X_h, y_w), dSigmoid_d(pred, buffer, X_h, y_w), pred_d, X_h, y_w );
5.2,3.4,1.4,0.2,
4.7,3.2,1.6,0.2,
4.8,3.1,1.6,0.2,
5.4,3.4,1.5,0.4,
5.2,4.1,1.5,0.1,
5.5,4.2,1.4,0.2,
4.9,3.1,1.5,0.1,
5.0,3.2,1.2,0.2,
5.5,3.5,1.3,0.2,
4.9,3.1,1.5,0.1,
4.4,3.0,1.3,0.2,
5.1,3.4,1.5,0.2,
5.0,3.5,1.3,0.3,
4.5,2.3,1.3,0.3,
4.4,3.2,1.3,0.2,
5.0,3.5,1.6,0.6,
5.1,3.8,1.9,0.4,
4.8,3.0,1.4,0.3,
5.1,3.8,1.6,0.2,
4.6,3.2,1.4,0.2,
5.3,3.7,1.5,0.2,
5.0,3.3,1.4,0.2,
//---
//5.4,3.9,1.3,0.4,
//5.1,3.5,1.4,0.3,
//5.7,3.8,1.7,0.3,
//5.1,3.8,1.5,0.3,
//------------------
/* 7.4,2.8,6.1,1.9,
7.9,3.8,6.4,2.0,
6.4,2.8,5.6,2.2,
6.3,2.8,5.1,1.5,
6.1,2.6,5.6,1.4,
7.7,3.0,6.1,2.3,
6.3,3.4,5.6,2.4,
6.4,3.1,5.5,1.8,
6.0,3.0,4.8,1.8,
6.9,3.1,5.4,2.1,
6.7,3.1,5.6,2.4,
6.9,3.1,5.1,2.3,
5.8,2.7,5.1,1.9,
6.8,3.2,5.9,2.3,
6.7,3.3,5.7,2.5,
6.7,3.0,5.2,2.3,
6.3,2.5,5.0,1.9,
6.2,3.4,5.4,2.3,
5.9,3.0,5.1,1.8,
6.5,3.0,5.2,2.0
};*/
const signed int X_size = sizeof(h_X);
//printf("tamao %i\n",X_size );
float *d_X;
//marcar inicio
hipEventRecord(start,0);
//asigamos un puntero dx, donce X_size es el tamao de memoria de los datos entradasXlogs
hipMalloc(&d_X,X_size);
hipMemcpy(d_X,h_X,X_size,hipMemcpyHostToDevice);
//tamao de los pesos asignados a la memoria device
const long signed int W0_size = L1_SIZE*TRAINING_DIM*sizeof(float);
//printf("peso 1 %li\n",W0_size);
//tamao de los pesos
float *h_W0 = (float*)malloc(W0_size);
for (int i = 0; i < L1_SIZE*TRAINING_DIM; i++){
//introduce elementos random
//h_W0[i] = 0.5;
h_W0[i] = 0.1 * (2.0*rand()/RAND_MAX-1.0);
}
float *d_W0;
//introduce el tamao de los pesos
hipMalloc(&d_W0, W0_size);
hipMemcpy(d_W0, h_W0, W0_size, hipMemcpyHostToDevice);
//LAYER_1, LAYER_1_DELTA AND BUFFER OF LAYER 1 SIZE
const long signed int L1_size = L1_SIZE*TRAINING_SIZE*sizeof(float);
float* h_layer_1 = (float*)malloc(L1_size);
float* h_layer_1_delta = (float*)malloc(L1_size);
float* h_buffer = (float*)malloc(L1_size);
for (int i = 0; i < L1_SIZE*TRAINING_SIZE; i++){
h_layer_1[i] = 0.0;
h_buffer[i] = 0.0;
h_layer_1_delta[i] = 0.0;
}
//Crea y asigna memoria para la capa
float *d_layer_1;
hipMalloc(&d_layer_1, L1_size);
hipMemcpy(d_layer_1, h_layer_1, L1_size, hipMemcpyHostToDevice);
//Crea y asigna memoria para buffer
float *d_buffer;
hipMalloc(&d_buffer, L1_size);
hipMemcpy(d_buffer, h_buffer, L1_size, hipMemcpyHostToDevice);
//Crea y asigna memoria para la derivada de
float *d_layer_1_delta;
hipMalloc(&d_layer_1_delta, L1_size);
hipMemcpy(d_layer_1_delta, h_layer_1_delta, L1_size, hipMemcpyHostToDevice);
//PESOS 1
const long signed int W1_size = L1_SIZE*sizeof(float);
float *h_W1 = (float*)malloc(W1_size);
for (int i = 0; i < L1_SIZE; i++){
h_W1[i] = 0.1* (2.0*rand()/RAND_MAX-1.0);
//h_W1[i]=0.5;
}
float *d_W1;
hipMalloc(&d_W1, W1_size);
hipMemcpy(d_W1, h_W1, W1_size, hipMemcpyHostToDevice);
//lectura de datos esperados
float h_y[30000];
for(int i=0;i<30000;i++){
if(i<30000){
h_y[i]=0;
}else{
h_y[i]=1;
}
}
/*for(int i=0;i<22;i++){
printf("%f\n",h_y[i]);
}*/
const signed int y_size = sizeof(h_y);
float *d_y;
hipMalloc(&d_y, y_size);
hipMemcpy(d_y, h_y, y_size, hipMemcpyHostToDevice);
//prediccion y preddicion delta
float* h_pred = (float*)malloc(y_size);
float* h_pred_delta = (float*)malloc(y_size);
for (int i = 0; i < TRAINING_SIZE; i++){
h_pred[i] = 0.0;
h_pred_delta[i] = 0.0;
}
float *d_pred;
hipMalloc(&d_pred, y_size);
hipMemcpy(d_pred, h_pred, y_size, hipMemcpyHostToDevice);
float *d_pred_delta;
hipMalloc(&d_pred_delta, y_size);
hipMemcpy(d_pred_delta, h_pred_delta, y_size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( ktrain), dim3(1), dim3(1) , 0, 0, d_X, TRAINING_DIM, TRAINING_SIZE,
d_y, 3,
d_layer_1, L1_SIZE, d_layer_1_delta,
d_pred,
d_pred_delta,
d_W0,
d_W1,
d_buffer);
hipMemcpy(h_pred, d_pred, y_size, hipMemcpyDeviceToHost);
hipFree(d_pred);
hipFree(d_X);
hipFree(d_y);
hipFree(d_layer_1_delta);
hipFree(d_pred_delta);
hipFree(d_W0);
hipFree(d_W1);
hipFree(d_buffer);
free(h_layer_1_delta);
free(h_pred_delta);
free(h_W0);
free(h_W1);
free(h_buffer);
//marcar final
hipEventRecord(stop,0);
//sincronizacion GPU=CPU
hipEventSynchronize(stop);
//calculo del tiempo en milisegundos
hipEventElapsedTime(&tiempo,start,stop);
//Impresion de resultados
//liberacin de recursos
hipEventDestroy(start);
hipEventDestroy(stop);
b=clock();
for (int i = 0; i < TRAINING_SIZE; i++){
/*if(i==9999 || i==9998 || i==9997 || i==19999 || i==19998 || i==19997 || i==29999 || i==29998 || i==29997){
printf("Prediccion[%i]: %f - valor real[%i]: %f - Error[%i]: %f\n", i, h_pred[i], i, h_y[i], i, h_pred[i] - h_y[i]);
}*/
}
free(h_pred);
printf(">Tiempo de ejecucin %f ms \n",tiempo);
//printf(">Tiempo de ejecucin cpu %i ms \n",b-a);
//getchar();
}//end while
}
| 2e9578569ddfccf317b3cfe2889e01e2faaadf87.cu |
#include <stdio.h>
#include <iostream>
#include <time.h>
#include <string>
#include <stdlib.h>
#include <new>
#define SIZE 300000*4
using namespace std;
__global__ void kMartixByMatrixElementwise(const int nThreads, const float *m1, const float *m2, float *output) {
/* Te almacena el el resultados de dos arreglos (elementos acertados)
Retorna un array donde los elementos calculados son almacenados aqui
*/
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
output[i] = m1[i] * m2[i];
}
}
__device__ float* dMartixByMatrixElementwise(const float *m1, const float *m2, float *output, const int width, const int height){
kMartixByMatrixElementwise <<< width, height >>> ( width * height, m1, m2, output );
//kMartixByMatrixElementwise <<< width/8, height/8 >>> ( width * height, m1, m2, output );
cudaDeviceSynchronize();
return output;
}
__global__ void kMartixSubstractMatrix(const int nThreads, const float *m1, const float *m2, float *output) {
//Computa los elementos diferenciados entre dos arrays
//Retorna un array donde los elementos calculados son almacenados aqui
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
output[i] = m1[i] - m2[i];
}
}
__device__ float* dMartixSubstractMatrix(const float *m1, const float *m2, float *output, const int width, const int height){
//X: size de valores iniciales
//x_w:Número de entradas
//x_h:Número de logs
//y:size de valores espérados
//y_w:(1)cantidad valores esperados
//l1:size para capa 1
//l1_w:numero de neuronas ocultas
//l_1_d:size derivada de layer 1
//pred: size de valores de Prediccion
//pred_d: valores de prediccion derivada
//WO:sizePesos iniciales
//w1:size de pesos de capa oculta
//buffer: size de valores de salidad
//--ACAC ,E QUEDEEEdMartixSubstractMatrix(y, pred, pred_d, X_h, y_w), dSigmoid_d(pred, buffer, X_h, y_w), pred_d, X_h, y_w );
kMartixSubstractMatrix <<< width, height >>> ( width * height, m1, m2, output );
//kMartixSubstractMatrix <<< width/8, height/8 >>> ( width * height, m1, m2, output );
cudaDeviceSynchronize();
return output;
}
__global__ void kSigmoid(const int nThreads, float const *input, float *output){
/* caulcula la funcion sigmoidaal f(x) = 1/(1 + e^-x).
*/
//nThreads: numero de entradas x Numero de neuronas ocultas
//input: size para capa 1
//ouput:size para capa 1
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
output[i] = 1.0 / (1.0 + std::exp(-input[i]));
}
}
__device__ void dSigmoid(float const *input, float *output, const int height, const int width){
//input: funcion dDot(m1*m2)
//ouput:size para capa 1
//heigth:numero de entradas
//width:numero de neuronas ocultas
kSigmoid <<< height, width >>> (height * width, input, output);
//kSigmoid <<< height/8, width/8 >>> (height * width, input, output);
cudaDeviceSynchronize();
}
__global__ void kSigmoid_d(const int nThreads, float const *input, float *output) {
/* calcula la derivada de la funcion sigmoidal f'(x) = f(x)(1 - f(x)),
salida: arreglo alamcenado aqui x(1 - x) para cada elemento de la matriz input m1
*/
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
output[i] = input[i] * (1 - input[i]);
}
}
__device__ float* dSigmoid_d(float const *input, float *output, const int rows, const int columns){
kSigmoid_d <<< rows, columns >>> (rows*columns, input, output);
//kSigmoid_d <<< rows/8, columns/8>>> (rows*columns, input, output);
cudaDeviceSynchronize();
return output;
}
__global__ void kDot(const int nThreads, const float *m1, const float *m2, float *output, const int m1_rows , const int m1_columns, const int m2_columns ){
//Calcula el producto de dos matrices, m1 y m2 arrays inputs,
//salida:m1*m2
//nThreads:multiplicacion por numero de salidas y entradas
//m1:size para capa 1
//m2: size para pesos de capa ocultas
//output: size de valores de prediccións
//m1_rows: Numero de logs
//m1_columns: Numero de neuronas ocultas
//m2_columns; Numero de cantidad de valores esperados
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
int r = (int)i / m2_columns;
int c = i % m2_columns;
float t_output = 0.f;
for( int k = 0; k < m1_columns; ++k ) {
t_output += m1[ r * m1_columns + k ] * m2[ k * m2_columns + c ];
}
output[i] = t_output;
}
}
__device__ float* dDot(const float *m1, const float *m2, float *output, const int m1_rows , const int m1_columns, const int m2_columns ){
//m1:size para capa 1
//m2:size de pesos de capa oculta
//output: size de valores de Prediccion
//m1_rows:Número de logs
//m1_columns:numero de neuronas ocultas
//m2_columns:(1)cantidad valores esperados
//funcion dDot(l1, W1, pred, X_h, l1_w, y_w)
kDot <<< m1_rows, m2_columns >>> (m1_rows * m2_columns, m1, m2, output, m1_rows , m1_columns, m2_columns );
//kDot <<< m1_rows/8, m2_columns/8>>> (m1_rows * m2_columns, m1, m2, output, m1_rows , m1_columns, m2_columns );
cudaDeviceSynchronize();
return output;
}
__global__ void kDot_m1_m2T(const int nThreads, const float *m1, const float *m2, float *output, const int m1_columns, const int m2_rows ){
//actualiza las salidas con el producto de dos dosmatrices trasnpuesta
//salida: producto de dos arrays
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
int r = (int)i / m2_rows;
int c = i % m2_rows;
float t_output = 0.0;
int id_T;
for( int k = 0; k < m1_columns; ++k ) {
id_T = c * m1_columns + k;
t_output += m1[ r * m1_columns + k ] * m2[ id_T ];
}
output[i] = t_output;
}
}
__device__ float* dDot_m1_m2T(const float *m1, const float *m2, float *output, const int m1_rows , const int m1_columns, const int m2_rows )
{
kDot_m1_m2T <<< m1_rows, m2_rows >>> ( m1_rows * m2_rows, m1, m2, output, m1_columns, m2_rows );
//kDot_m1_m2T <<< m1_rows/8, m2_rows/8 >>> ( m1_rows * m2_rows, m1, m2, output, m1_columns, m2_rows );
cudaDeviceSynchronize();
return output;
}
__global__ void kDot_m1T_m2(const int nThreads, const float *m1, const float *m2, float *output, const int m1_rows,
const int m1_columns, const int m2_columns ){
//Incrementa la salida de la matriz con el producto de dos matrices: m1 trasnpuesta con m2
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
int r = (int)i / m2_columns;
int c = i % m2_columns;
int id_T;
float t_output = 0.0;
for( int k = 0; k < m1_rows; ++k ) {
id_T = k * m1_columns + r;
t_output += m1[ id_T ] * m2[ k * m2_columns + c ];
}
output[i] += t_output;
}
}
__device__ void dDot_m1T_m2(const float *m1, const float *m2, float *output, const int m1_height , const int m1_width, const int m2_width )
{
kDot_m1T_m2 <<< m1_width, m2_width >>> (m1_width * m2_width, m1, m2, output, m1_height, m1_width, m2_width );
//kDot_m1T_m2 <<< m1_width/8, m2_width/8 >>> (m1_width * m2_width, m1, m2, output, m1_height, m1_width, m2_width );
cudaDeviceSynchronize();
}
__device__ void kPrintMatrix (const float* M, int h, int w) {
// imprime hxw
for (int i = 0; i < h; i++){
for (int j = 0; j < w; j++){
printf("%f ", M[i*w+j]);
}
printf("\n");
}
printf("\n");
}
__global__ void ktrain( const float* X, const int X_w, const int X_h,
const float* y, const int y_w,
float* l1, const int l1_w, float* l_1_d,
float* pred, float* pred_d,
float* W0,
float* W1,
float* buffer
)
{
for (unsigned i = 0; i < 50; ++i) {//numero de epocas
//X: size de valores iniciales
//x_w:Número de entradas
//x_h:Número de logs
//y:size de valores espérados
//y_w:(1)cantidad valores esperados
//l1:size para capa 1
//l1_w:numero de neuronas ocultas
//l_1_d:size derivada de layer 1
//pred: size de valores de Prediccion
//pred_d: valores de prediccion derivada
//WO:sizePesos iniciales
//w1:size de pesos de capa oculta
//buffer: size de valores de salidad
dSigmoid(dDot(X, W0, l1, X_h, X_w, l1_w), l1, X_h, l1_w);// capa 1
dSigmoid(dDot(l1, W1, pred, X_h, l1_w, y_w), pred, X_h, y_w);//cape 2
dMartixByMatrixElementwise(dMartixSubstractMatrix(y, pred, pred_d, X_h, y_w), dSigmoid_d(pred, buffer, X_h, y_w), pred_d, X_h, y_w );
dMartixByMatrixElementwise(dDot_m1_m2T(pred_d, W1, l_1_d, X_h, y_w, l1_w), dSigmoid_d(l1, buffer, X_h, l1_w), l_1_d, X_h, l1_w);
dDot_m1T_m2( l1, pred_d, W1, X_h, l1_w, y_w );
dDot_m1T_m2( X, l_1_d, W0, X_h, X_w, l1_w );
}
}
__host__ float * read(){
FILE *archivo;
// float *array = malloc(sizeof(float)*SIZE);
float *array = new float[SIZE];
// static float array[SIZE];
double i;
float n;
archivo = fopen("oversample.txt","rt");
i = fscanf(archivo,"%f, ",&n);
int k = 0;
while(i != EOF){
if(k == SIZE) break;
array[k] = n;
fscanf(archivo,"%f, ",&n);
k++;
}
printf("%d\n",k );
fclose(archivo);
return array;
}
__host__ float * data_range(int begin, int end , float * array){
// float * arr = malloc((end-begin)*sizeof(float));
//int local_size = end - begin;
//static float arr[local_size];
float * arr=new float[end-begin];
int i = 0;
for(int k = begin; k < end; k++ ){
arr[i] = array[k];
// printf("%f \n",arr[i] );
i++;
}
return arr;
}
int main(void){
const int TRAINING_SIZE = 30000;//numero de logs
const int TRAINING_DIM = 4; //numero de variables
int L1_SIZE=4; //numero de neuronas
clock_t a,b;
//declaracion de eventos
cudaEvent_t start;
cudaEvent_t stop;
float tiempo;
float *data = read();
int inicio = 0*4, fin = 30000*4;
float *h_X = data_range(inicio, fin, data);
/*for(int i = 0; i < 100; i++){
printf("%d \n",data[i]);
}*/
/*for(int i=0;i<22;i++){
printf("%f\n",h_X[i]);
}*/
int i=0;
while(i<=10){
i++;
L1_SIZE=i*10;
// float h_X;
tiempo=0;
//scanf("%i\n",&L1_SIZE );
a=clock();
//Creación de eventos
cudaEventCreate(&start);
cudaEventCreate(&stop);
/* float h_X[TRAINING_SIZE*TRAINING_DIM] = {
5.1,3.5,1.4,0.2,
4.9,3.0,1.4,0.2,
4.7,3.2,1.3,0.2,
4.6,3.1,1.5,0.2,
5.0,3.6,1.4,0.2,
5.4,3.9,1.7,0.4,
4.6,3.4,1.4,0.3, //buffer: size de valores de salidad
dMartixSubstractMatrix(y, pred, pred_d, X_h, y_w), dSigmoid_d(pred, buffer, X_h, y_w), pred_d, X_h, y_w );
5.0,3.4,1.5,0.2,
4.4,2.9,1.4,0.2,
4.9,3.1,1.5,0.1,
5.4,3.7,1.5,0.2,
4.8,3.4,1.6,0.2,
4.8,3.0,1.4,0.1,
4.3,3.0,1.1,0.1,
5.8,4.0,1.2,0.2,
5.7,4.4,1.5,0.4,
5.4,3.9,1.3,0.4,
5.1,3.5,1.4,0.3,
5.7,3.8,1.7,0.3,
5.1,3.8,1.5,0.3,
5.4,3.4,1.7,0.2,
5.1,3.7,1.5,0.4,
4.6,3.6,1.0,0.2,
5.1,3.3,1.7,0.5,
4.8,3.4,1.9,0.2,
5.0,3.0,1.6,0.2,
5.0,3.4,1.6,0.4,
5.2,3.5,1.5,0.2, //buffer: size de valores de salidad
dMartixSubstractMatrix(y, pred, pred_d, X_h, y_w), dSigmoid_d(pred, buffer, X_h, y_w), pred_d, X_h, y_w );
5.2,3.4,1.4,0.2,
4.7,3.2,1.6,0.2,
4.8,3.1,1.6,0.2,
5.4,3.4,1.5,0.4,
5.2,4.1,1.5,0.1,
5.5,4.2,1.4,0.2,
4.9,3.1,1.5,0.1,
5.0,3.2,1.2,0.2,
5.5,3.5,1.3,0.2,
4.9,3.1,1.5,0.1,
4.4,3.0,1.3,0.2,
5.1,3.4,1.5,0.2,
5.0,3.5,1.3,0.3,
4.5,2.3,1.3,0.3,
4.4,3.2,1.3,0.2,
5.0,3.5,1.6,0.6,
5.1,3.8,1.9,0.4,
4.8,3.0,1.4,0.3,
5.1,3.8,1.6,0.2,
4.6,3.2,1.4,0.2,
5.3,3.7,1.5,0.2,
5.0,3.3,1.4,0.2,
//---
//5.4,3.9,1.3,0.4,
//5.1,3.5,1.4,0.3,
//5.7,3.8,1.7,0.3,
//5.1,3.8,1.5,0.3,
//------------------
/* 7.4,2.8,6.1,1.9,
7.9,3.8,6.4,2.0,
6.4,2.8,5.6,2.2,
6.3,2.8,5.1,1.5,
6.1,2.6,5.6,1.4,
7.7,3.0,6.1,2.3,
6.3,3.4,5.6,2.4,
6.4,3.1,5.5,1.8,
6.0,3.0,4.8,1.8,
6.9,3.1,5.4,2.1,
6.7,3.1,5.6,2.4,
6.9,3.1,5.1,2.3,
5.8,2.7,5.1,1.9,
6.8,3.2,5.9,2.3,
6.7,3.3,5.7,2.5,
6.7,3.0,5.2,2.3,
6.3,2.5,5.0,1.9,
6.2,3.4,5.4,2.3,
5.9,3.0,5.1,1.8,
6.5,3.0,5.2,2.0
};*/
const signed int X_size = sizeof(h_X);
//printf("tamaño %i\n",X_size );
float *d_X;
//marcar inicio
cudaEventRecord(start,0);
//asigamos un puntero dx, donce X_size es el tamaño de memoria de los datos entradasXlogs
cudaMalloc(&d_X,X_size);
cudaMemcpy(d_X,h_X,X_size,cudaMemcpyHostToDevice);
//tamaño de los pesos asignados a la memoria device
const long signed int W0_size = L1_SIZE*TRAINING_DIM*sizeof(float);
//printf("peso 1 %li\n",W0_size);
//tamaño de los pesos
float *h_W0 = (float*)malloc(W0_size);
for (int i = 0; i < L1_SIZE*TRAINING_DIM; i++){
//introduce elementos random
//h_W0[i] = 0.5;
h_W0[i] = 0.1 * (2.0*rand()/RAND_MAX-1.0);
}
float *d_W0;
//introduce el tamaño de los pesos
cudaMalloc(&d_W0, W0_size);
cudaMemcpy(d_W0, h_W0, W0_size, cudaMemcpyHostToDevice);
//LAYER_1, LAYER_1_DELTA AND BUFFER OF LAYER 1 SIZE
const long signed int L1_size = L1_SIZE*TRAINING_SIZE*sizeof(float);
float* h_layer_1 = (float*)malloc(L1_size);
float* h_layer_1_delta = (float*)malloc(L1_size);
float* h_buffer = (float*)malloc(L1_size);
for (int i = 0; i < L1_SIZE*TRAINING_SIZE; i++){
h_layer_1[i] = 0.0;
h_buffer[i] = 0.0;
h_layer_1_delta[i] = 0.0;
}
//Crea y asigna memoria para la capa
float *d_layer_1;
cudaMalloc(&d_layer_1, L1_size);
cudaMemcpy(d_layer_1, h_layer_1, L1_size, cudaMemcpyHostToDevice);
//Crea y asigna memoria para buffer
float *d_buffer;
cudaMalloc(&d_buffer, L1_size);
cudaMemcpy(d_buffer, h_buffer, L1_size, cudaMemcpyHostToDevice);
//Crea y asigna memoria para la derivada de
float *d_layer_1_delta;
cudaMalloc(&d_layer_1_delta, L1_size);
cudaMemcpy(d_layer_1_delta, h_layer_1_delta, L1_size, cudaMemcpyHostToDevice);
//PESOS 1
const long signed int W1_size = L1_SIZE*sizeof(float);
float *h_W1 = (float*)malloc(W1_size);
for (int i = 0; i < L1_SIZE; i++){
h_W1[i] = 0.1* (2.0*rand()/RAND_MAX-1.0);
//h_W1[i]=0.5;
}
float *d_W1;
cudaMalloc(&d_W1, W1_size);
cudaMemcpy(d_W1, h_W1, W1_size, cudaMemcpyHostToDevice);
//lectura de datos esperados
float h_y[30000];
for(int i=0;i<30000;i++){
if(i<30000){
h_y[i]=0;
}else{
h_y[i]=1;
}
}
/*for(int i=0;i<22;i++){
printf("%f\n",h_y[i]);
}*/
const signed int y_size = sizeof(h_y);
float *d_y;
cudaMalloc(&d_y, y_size);
cudaMemcpy(d_y, h_y, y_size, cudaMemcpyHostToDevice);
//prediccion y preddicion delta
float* h_pred = (float*)malloc(y_size);
float* h_pred_delta = (float*)malloc(y_size);
for (int i = 0; i < TRAINING_SIZE; i++){
h_pred[i] = 0.0;
h_pred_delta[i] = 0.0;
}
float *d_pred;
cudaMalloc(&d_pred, y_size);
cudaMemcpy(d_pred, h_pred, y_size, cudaMemcpyHostToDevice);
float *d_pred_delta;
cudaMalloc(&d_pred_delta, y_size);
cudaMemcpy(d_pred_delta, h_pred_delta, y_size, cudaMemcpyHostToDevice);
ktrain<<< 1, 1 >>> ( d_X, TRAINING_DIM, TRAINING_SIZE,
d_y, 3,
d_layer_1, L1_SIZE, d_layer_1_delta,
d_pred,
d_pred_delta,
d_W0,
d_W1,
d_buffer);
cudaMemcpy(h_pred, d_pred, y_size, cudaMemcpyDeviceToHost);
cudaFree(d_pred);
cudaFree(d_X);
cudaFree(d_y);
cudaFree(d_layer_1_delta);
cudaFree(d_pred_delta);
cudaFree(d_W0);
cudaFree(d_W1);
cudaFree(d_buffer);
free(h_layer_1_delta);
free(h_pred_delta);
free(h_W0);
free(h_W1);
free(h_buffer);
//marcar final
cudaEventRecord(stop,0);
//sincronizacion GPU=CPU
cudaEventSynchronize(stop);
//calculo del tiempo en milisegundos
cudaEventElapsedTime(&tiempo,start,stop);
//Impresion de resultados
//liberación de recursos
cudaEventDestroy(start);
cudaEventDestroy(stop);
b=clock();
for (int i = 0; i < TRAINING_SIZE; i++){
/*if(i==9999 || i==9998 || i==9997 || i==19999 || i==19998 || i==19997 || i==29999 || i==29998 || i==29997){
printf("Prediccion[%i]: %f - valor real[%i]: %f - Error[%i]: %f\n", i, h_pred[i], i, h_y[i], i, h_pred[i] - h_y[i]);
}*/
}
free(h_pred);
printf(">Tiempo de ejecución %f ms \n",tiempo);
//printf(">Tiempo de ejecución cpu %i ms \n",b-a);
//getchar();
}//end while
}
|
b3fec61cba403b2f2ef051370327ecaeeeccf61f.hip | // !!! This is a file automatically generated by hipify!!!
#include "cuNVSM/model.h"
#include <algorithm>
#include <glog/logging.h>
template <typename FloatT, typename WordIdxType, typename EntityIdxType>
ModelBase<FloatT, WordIdxType, EntityIdxType>::ModelBase(
const size_t num_words,
const size_t num_entities,
const lse::ModelDesc& desc,
const lse::TrainConfig& train_config)
: desc_(desc),
streams_(new DefaultStream), // Multiple streams do not seem to improve training speed,
// but creates issues with memory allocation.
words_(WORD_REPRS,
num_words, desc_.word_repr_size(),
train_config.update_method(),
streams_.get()),
entities_(ENTITY_REPRS,
num_entities, desc_.entity_repr_size(),
train_config.update_method(),
streams_.get()),
transform_(TRANSFORM,
desc_.transform_desc(),
desc_.word_repr_size(), desc_.entity_repr_size(),
train_config.update_method(),
streams_.get()),
params_({
{WORD_REPRS, &words_},
{ENTITY_REPRS, &entities_},
{TRANSFORM, &transform_},
}) {}
template <typename FloatT, typename WordIdxType, typename EntityIdxType>
ModelBase<FloatT, WordIdxType, EntityIdxType>::~ModelBase() {}
template <typename FloatT, typename WordIdxType, typename EntityIdxType>
void ModelBase<FloatT, WordIdxType, EntityIdxType>::initialize(
RNG* const rng) {
words_.initialize(rng);
entities_.initialize(rng);
transform_.initialize(rng);
}
template <typename FloatT, typename WordIdxType, typename EntityIdxType>
device_matrix<FloatT>*
ModelBase<FloatT, WordIdxType, EntityIdxType>::get_phrase_representations(
const hipStream_t stream,
const device_matrix<WordIdxType>& flattened_words,
const size_t window_size,
const device_matrix<FloatT>* const flattened_word_weights) const {
DCHECK(initialized());
DCHECK_EQ(flattened_words.size() % window_size, 0);
device_matrix<FloatT>* const phrase_reprs =
words_.get_average_representations(stream, flattened_words, window_size,
flattened_word_weights);
CHECK_MATRIX(*phrase_reprs);
return phrase_reprs;
}
template <typename FloatT, typename WordIdxType, typename EntityIdxType>
typename Storage<FloatT>::DataType
ModelBase<FloatT, WordIdxType, EntityIdxType>::get_data() const {
DCHECK(this->initialized());
typename Storage<FloatT>::DataType data;
for (const auto& pair : params_) {
const ParamIdentifier param_id = pair.first;
Parameters<FloatT>* const param = pair.second;
Storage<FloatT>* const storage = dynamic_cast<Storage<FloatT>*>(param);
CHECK_NOTNULL(storage);
for (const auto& matrix_pair : storage->get_data()) {
std::stringstream ss;
ss << ParamName[param_id] << "-" << matrix_pair.first;
const std::string name = ss.str();
const device_matrix<FloatT>* matrix = matrix_pair.second;
CHECK(!contains_key(data, name));
data.insert(std::make_pair(name, matrix));
}
}
return data;
}
template <typename ObjectiveT>
Model<ObjectiveT>::Model(
const size_t num_words,
const size_t num_entities,
const lse::ModelDesc& desc,
const lse::TrainConfig& train_config)
: ModelBase<FloatT, WordIdxType, EntityIdxType>(
num_words, num_entities, desc, train_config),
objective_(new ObjectiveT(this, train_config)) {}
template <typename ObjectiveT>
device_matrix<typename Model<ObjectiveT>::FloatT>* Model<ObjectiveT>::infer(
const std::vector<std::vector<WordIdxType> >& words,
const size_t window_size) const {
PROFILE_FUNCTION();
DCHECK(this->initialized());
const hipStream_t stream = DefaultStream::get()->next();
std::unique_ptr<device_matrix<WordIdxType>> flattened_words(
new device_matrix<WordIdxType>(words.size() * window_size, 1, stream));
flatten(stream, words, flattened_words.get());
// Get phrase representations.
std::unique_ptr<device_matrix<FloatT> > phrase_reprs(
this->get_phrase_representations(
stream, *flattened_words, window_size));
// Project to entity space.
std::unique_ptr<device_matrix<FloatT> > word_repr_projections(
this->transform_.transform(
stream,
*phrase_reprs,
nullptr /* batch_normalization */));
hipDeviceSynchronize();
return word_repr_projections.release();
}
template <typename ObjectiveT>
typename Model<ObjectiveT>::ForwardResult*
Model<ObjectiveT>::compute_cost(
const Batch& batch,
RNG* const rng) const {
DCHECK(this->initialized());
return objective_->compute_cost(batch, rng);
}
template <typename ObjectiveT>
typename Model<ObjectiveT>::Gradients*
Model<ObjectiveT>::compute_gradients(
const ForwardResult& result) {
DCHECK(this->initialized());
return objective_->compute_gradients(result);
}
template <typename ObjectiveT>
typename Model<ObjectiveT>::FloatT
Model<ObjectiveT>::get_cost(
const Batch& batch,
const std::stringstream* const rng_state,
RNG* const rng) const {
PROFILE_FUNCTION();
DCHECK(this->initialized());
if (rng_state != nullptr) {
CHECK(!rng_state->eof() && rng_state->good());
std::stringstream rng_state_copy;
rng_state_copy << rng_state->str();
rng_state_copy >> *rng;
}
std::unique_ptr<ForwardResult> result(compute_cost(batch, rng));
return result->get_cost();
}
template <typename ObjectiveT>
void Model<ObjectiveT>::backprop(
const ForwardResult& result,
const FloatT learning_rate) {
PROFILE_FUNCTION();
DCHECK(this->initialized());
std::unique_ptr<Gradients> gradients(compute_gradients(result));
update(*gradients, learning_rate, result.scaled_regularization_lambda());
}
template <typename ObjectiveT>
void Model<ObjectiveT>::update(
const Gradients& gradients,
const FloatT learning_rate,
const FloatT scaled_regularization_lambda) {
DCHECK(this->initialized());
//
// Apply gradients.
//
// TODO(cvangysel): make async using streams.
CHECK_MATRIX_NORM(*gradients.grad_entity_repr_);
// Update entity representations.
this->entities_.update(
gradients, learning_rate, scaled_regularization_lambda,
this->streams_.get());
CHECK_MATRIX_NORM(*gradients.grad_phrase_reprs_);
// Update word representations.
this->words_.update(
gradients, learning_rate, scaled_regularization_lambda,
this->streams_.get());
CHECK_MATRIX_NORM(*gradients.grad_transform_matrix_);
CHECK_MATRIX_NORM(*gradients.grad_bias_);
this->transform_.update(
gradients, learning_rate, scaled_regularization_lambda,
this->streams_.get());
}
// Explicit instantiations.
template class ModelBase<FLOATING_POINT_TYPE, int32, int32>;
template class Model<TextEntity::Objective>;
template class Model<EntityEntity::Objective>;
template class Model<TermTerm::Objective>;
template class Model<TextEntityEntityEntity::Objective>;
template class Model<TextEntityTermTerm::Objective>;
| b3fec61cba403b2f2ef051370327ecaeeeccf61f.cu | #include "cuNVSM/model.h"
#include <algorithm>
#include <glog/logging.h>
template <typename FloatT, typename WordIdxType, typename EntityIdxType>
ModelBase<FloatT, WordIdxType, EntityIdxType>::ModelBase(
const size_t num_words,
const size_t num_entities,
const lse::ModelDesc& desc,
const lse::TrainConfig& train_config)
: desc_(desc),
streams_(new DefaultStream), // Multiple streams do not seem to improve training speed,
// but creates issues with memory allocation.
words_(WORD_REPRS,
num_words, desc_.word_repr_size(),
train_config.update_method(),
streams_.get()),
entities_(ENTITY_REPRS,
num_entities, desc_.entity_repr_size(),
train_config.update_method(),
streams_.get()),
transform_(TRANSFORM,
desc_.transform_desc(),
desc_.word_repr_size(), desc_.entity_repr_size(),
train_config.update_method(),
streams_.get()),
params_({
{WORD_REPRS, &words_},
{ENTITY_REPRS, &entities_},
{TRANSFORM, &transform_},
}) {}
template <typename FloatT, typename WordIdxType, typename EntityIdxType>
ModelBase<FloatT, WordIdxType, EntityIdxType>::~ModelBase() {}
template <typename FloatT, typename WordIdxType, typename EntityIdxType>
void ModelBase<FloatT, WordIdxType, EntityIdxType>::initialize(
RNG* const rng) {
words_.initialize(rng);
entities_.initialize(rng);
transform_.initialize(rng);
}
template <typename FloatT, typename WordIdxType, typename EntityIdxType>
device_matrix<FloatT>*
ModelBase<FloatT, WordIdxType, EntityIdxType>::get_phrase_representations(
const cudaStream_t stream,
const device_matrix<WordIdxType>& flattened_words,
const size_t window_size,
const device_matrix<FloatT>* const flattened_word_weights) const {
DCHECK(initialized());
DCHECK_EQ(flattened_words.size() % window_size, 0);
device_matrix<FloatT>* const phrase_reprs =
words_.get_average_representations(stream, flattened_words, window_size,
flattened_word_weights);
CHECK_MATRIX(*phrase_reprs);
return phrase_reprs;
}
template <typename FloatT, typename WordIdxType, typename EntityIdxType>
typename Storage<FloatT>::DataType
ModelBase<FloatT, WordIdxType, EntityIdxType>::get_data() const {
DCHECK(this->initialized());
typename Storage<FloatT>::DataType data;
for (const auto& pair : params_) {
const ParamIdentifier param_id = pair.first;
Parameters<FloatT>* const param = pair.second;
Storage<FloatT>* const storage = dynamic_cast<Storage<FloatT>*>(param);
CHECK_NOTNULL(storage);
for (const auto& matrix_pair : storage->get_data()) {
std::stringstream ss;
ss << ParamName[param_id] << "-" << matrix_pair.first;
const std::string name = ss.str();
const device_matrix<FloatT>* matrix = matrix_pair.second;
CHECK(!contains_key(data, name));
data.insert(std::make_pair(name, matrix));
}
}
return data;
}
template <typename ObjectiveT>
Model<ObjectiveT>::Model(
const size_t num_words,
const size_t num_entities,
const lse::ModelDesc& desc,
const lse::TrainConfig& train_config)
: ModelBase<FloatT, WordIdxType, EntityIdxType>(
num_words, num_entities, desc, train_config),
objective_(new ObjectiveT(this, train_config)) {}
template <typename ObjectiveT>
device_matrix<typename Model<ObjectiveT>::FloatT>* Model<ObjectiveT>::infer(
const std::vector<std::vector<WordIdxType> >& words,
const size_t window_size) const {
PROFILE_FUNCTION();
DCHECK(this->initialized());
const cudaStream_t stream = DefaultStream::get()->next();
std::unique_ptr<device_matrix<WordIdxType>> flattened_words(
new device_matrix<WordIdxType>(words.size() * window_size, 1, stream));
flatten(stream, words, flattened_words.get());
// Get phrase representations.
std::unique_ptr<device_matrix<FloatT> > phrase_reprs(
this->get_phrase_representations(
stream, *flattened_words, window_size));
// Project to entity space.
std::unique_ptr<device_matrix<FloatT> > word_repr_projections(
this->transform_.transform(
stream,
*phrase_reprs,
nullptr /* batch_normalization */));
cudaDeviceSynchronize();
return word_repr_projections.release();
}
template <typename ObjectiveT>
typename Model<ObjectiveT>::ForwardResult*
Model<ObjectiveT>::compute_cost(
const Batch& batch,
RNG* const rng) const {
DCHECK(this->initialized());
return objective_->compute_cost(batch, rng);
}
template <typename ObjectiveT>
typename Model<ObjectiveT>::Gradients*
Model<ObjectiveT>::compute_gradients(
const ForwardResult& result) {
DCHECK(this->initialized());
return objective_->compute_gradients(result);
}
template <typename ObjectiveT>
typename Model<ObjectiveT>::FloatT
Model<ObjectiveT>::get_cost(
const Batch& batch,
const std::stringstream* const rng_state,
RNG* const rng) const {
PROFILE_FUNCTION();
DCHECK(this->initialized());
if (rng_state != nullptr) {
CHECK(!rng_state->eof() && rng_state->good());
std::stringstream rng_state_copy;
rng_state_copy << rng_state->str();
rng_state_copy >> *rng;
}
std::unique_ptr<ForwardResult> result(compute_cost(batch, rng));
return result->get_cost();
}
template <typename ObjectiveT>
void Model<ObjectiveT>::backprop(
const ForwardResult& result,
const FloatT learning_rate) {
PROFILE_FUNCTION();
DCHECK(this->initialized());
std::unique_ptr<Gradients> gradients(compute_gradients(result));
update(*gradients, learning_rate, result.scaled_regularization_lambda());
}
template <typename ObjectiveT>
void Model<ObjectiveT>::update(
const Gradients& gradients,
const FloatT learning_rate,
const FloatT scaled_regularization_lambda) {
DCHECK(this->initialized());
//
// Apply gradients.
//
// TODO(cvangysel): make async using streams.
CHECK_MATRIX_NORM(*gradients.grad_entity_repr_);
// Update entity representations.
this->entities_.update(
gradients, learning_rate, scaled_regularization_lambda,
this->streams_.get());
CHECK_MATRIX_NORM(*gradients.grad_phrase_reprs_);
// Update word representations.
this->words_.update(
gradients, learning_rate, scaled_regularization_lambda,
this->streams_.get());
CHECK_MATRIX_NORM(*gradients.grad_transform_matrix_);
CHECK_MATRIX_NORM(*gradients.grad_bias_);
this->transform_.update(
gradients, learning_rate, scaled_regularization_lambda,
this->streams_.get());
}
// Explicit instantiations.
template class ModelBase<FLOATING_POINT_TYPE, int32, int32>;
template class Model<TextEntity::Objective>;
template class Model<EntityEntity::Objective>;
template class Model<TermTerm::Objective>;
template class Model<TextEntityEntityEntity::Objective>;
template class Model<TextEntityTermTerm::Objective>;
|
036d26c3cbea6c0e73ca8ddaccb83899cbcbd73f.hip | // !!! This is a file automatically generated by hipify!!!
extern "C" {
#include "blas.h"
#include "hip/hip_runtime.h"
}
__global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[OFFY+i*INCY] += ALPHA*X[OFFX+i*INCX];
}
__global__ void scal_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] *= ALPHA;
}
__global__ void mask_kernel(int n, float *x, float *mask)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n && mask[i] == 0) x[i] = 0;
}
__global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX];
}
extern "C" void axpy_ongpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY)
{
axpy_ongpu_offset(N, ALPHA, X, 0, INCX, Y, 0, INCY);
}
extern "C" void axpy_ongpu_offset(int N, float ALPHA, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
{
hipLaunchKernelGGL(( axpy_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, OFFX, INCX, Y, OFFY, INCY);
check_error(hipPeekAtLastError());
}
extern "C" void copy_ongpu(int N, float * X, int INCX, float * Y, int INCY)
{
copy_ongpu_offset(N, X, 0, INCX, Y, 0, INCY);
}
extern "C" void copy_ongpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
{
hipLaunchKernelGGL(( copy_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, OFFX, INCX, Y, OFFY, INCY);
check_error(hipPeekAtLastError());
}
extern "C" void mask_ongpu(int N, float * X, float * mask)
{
hipLaunchKernelGGL(( mask_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, X, mask);
check_error(hipPeekAtLastError());
}
extern "C" void scal_ongpu(int N, float ALPHA, float * X, int INCX)
{
hipLaunchKernelGGL(( scal_kernel), dim3(cuda_gridsize(N)), dim3(BLOCK), 0, 0, N, ALPHA, X, INCX);
check_error(hipPeekAtLastError());
}
| 036d26c3cbea6c0e73ca8ddaccb83899cbcbd73f.cu | extern "C" {
#include "blas.h"
#include "cuda.h"
}
__global__ void axpy_kernel(int N, float ALPHA, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[OFFY+i*INCY] += ALPHA*X[OFFX+i*INCX];
}
__global__ void scal_kernel(int N, float ALPHA, float *X, int INCX)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) X[i*INCX] *= ALPHA;
}
__global__ void mask_kernel(int n, float *x, float *mask)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n && mask[i] == 0) x[i] = 0;
}
__global__ void copy_kernel(int N, float *X, int OFFX, int INCX, float *Y, int OFFY, int INCY)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < N) Y[i*INCY + OFFY] = X[i*INCX + OFFX];
}
extern "C" void axpy_ongpu(int N, float ALPHA, float * X, int INCX, float * Y, int INCY)
{
axpy_ongpu_offset(N, ALPHA, X, 0, INCX, Y, 0, INCY);
}
extern "C" void axpy_ongpu_offset(int N, float ALPHA, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
{
axpy_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, OFFX, INCX, Y, OFFY, INCY);
check_error(cudaPeekAtLastError());
}
extern "C" void copy_ongpu(int N, float * X, int INCX, float * Y, int INCY)
{
copy_ongpu_offset(N, X, 0, INCX, Y, 0, INCY);
}
extern "C" void copy_ongpu_offset(int N, float * X, int OFFX, int INCX, float * Y, int OFFY, int INCY)
{
copy_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, OFFX, INCX, Y, OFFY, INCY);
check_error(cudaPeekAtLastError());
}
extern "C" void mask_ongpu(int N, float * X, float * mask)
{
mask_kernel<<<cuda_gridsize(N), BLOCK>>>(N, X, mask);
check_error(cudaPeekAtLastError());
}
extern "C" void scal_ongpu(int N, float ALPHA, float * X, int INCX)
{
scal_kernel<<<cuda_gridsize(N), BLOCK>>>(N, ALPHA, X, INCX);
check_error(cudaPeekAtLastError());
}
|
dfea8411d17adb401644d3cd159142c291c56906.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kRectifyBoundingBox( float* boxes, float* width_offset, float* height_offset, float* flip, int num_images, int patch_width, int patch_height, int num_locs) {
for (int loc_id = blockIdx.x; loc_id < num_locs; loc_id += gridDim.x) {
float *xmin_block = boxes + num_images * loc_id,
*ymin_block = boxes + num_images * (loc_id + num_locs),
*xmax_block = boxes + num_images * (loc_id + num_locs * 2),
*ymax_block = boxes + num_images * (loc_id + num_locs * 3);
for (int image_id = threadIdx.x; image_id < num_images; image_id += blockDim.x) {
float xmin = (flip[image_id] > 0.5) ? (256.0/patch_width - xmax_block[image_id]) : xmin_block[image_id],
xmax = (flip[image_id] > 0.5) ? (256.0/patch_width - xmin_block[image_id]) : xmax_block[image_id],
ymin = ymin_block[image_id],
ymax = ymax_block[image_id],
wo = width_offset[image_id],
ho = height_offset[image_id];
xmin_block[image_id] = xmin - wo / patch_width;
xmax_block[image_id] = xmax - wo / patch_width;
ymin_block[image_id] = ymin - ho / patch_height;
ymax_block[image_id] = ymax - ho / patch_height;
}
}
} | dfea8411d17adb401644d3cd159142c291c56906.cu | #include "includes.h"
__global__ void kRectifyBoundingBox( float* boxes, float* width_offset, float* height_offset, float* flip, int num_images, int patch_width, int patch_height, int num_locs) {
for (int loc_id = blockIdx.x; loc_id < num_locs; loc_id += gridDim.x) {
float *xmin_block = boxes + num_images * loc_id,
*ymin_block = boxes + num_images * (loc_id + num_locs),
*xmax_block = boxes + num_images * (loc_id + num_locs * 2),
*ymax_block = boxes + num_images * (loc_id + num_locs * 3);
for (int image_id = threadIdx.x; image_id < num_images; image_id += blockDim.x) {
float xmin = (flip[image_id] > 0.5) ? (256.0/patch_width - xmax_block[image_id]) : xmin_block[image_id],
xmax = (flip[image_id] > 0.5) ? (256.0/patch_width - xmin_block[image_id]) : xmax_block[image_id],
ymin = ymin_block[image_id],
ymax = ymax_block[image_id],
wo = width_offset[image_id],
ho = height_offset[image_id];
xmin_block[image_id] = xmin - wo / patch_width;
xmax_block[image_id] = xmax - wo / patch_width;
ymin_block[image_id] = ymin - ho / patch_height;
ymax_block[image_id] = ymax - ho / patch_height;
}
}
} |
c831d44867e79984b932be390a0d46b277faa660.hip | // !!! This is a file automatically generated by hipify!!!
#include "chainerx/cuda/cuda_device.h"
#include <cmath>
#include <cstdint>
#include <hip/hip_runtime.h>
#include "chainerx/array.h"
#include "chainerx/cuda/cuda_runtime.h"
#include "chainerx/cuda/cuda_set_device_scope.h"
#include "chainerx/cuda/elementwise.cuh"
#include "chainerx/cuda/kernel_regist.h"
#include "chainerx/cuda/numeric.cuh"
#include "chainerx/device.h"
#include "chainerx/dtype.h"
#include "chainerx/kernels/math.h"
#include "chainerx/numeric.h"
#include "chainerx/routines/math.h"
#include "chainerx/routines/type_util.h"
#include "chainerx/scalar.h"
namespace chainerx {
namespace cuda {
namespace {
template <typename In, typename Out>
struct IfLessElseASSAImpl {
using InCudaType = cuda_internal::DataType<In>;
using OutCudaType = cuda_internal::DataType<Out>;
__device__ void operator()(int64_t /*i*/, InCudaType x1, OutCudaType neg, OutCudaType& out) { out = x1 < x2 ? pos : neg; }
InCudaType x2;
OutCudaType pos;
};
class CudaIfLessElseASSAKernel : public IfLessElseASSAKernel {
public:
void Call(const Array& x1, Scalar x2, Scalar pos, const Array& neg, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, neg, out);
Dtype x_dtype = ResultType(x1, x2);
const Array& x1_cast = x1.dtype() == x_dtype ? x1 : x1.AsType(x_dtype);
const Array& neg_cast = neg.dtype() == out.dtype() ? neg : neg.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitNumericDtype(x_dtype, [&](auto x_pt) {
using In = typename decltype(x_pt)::type;
using InCudaType = cuda_internal::DataType<In>;
VisitNumericDtype(out.dtype(), [&](auto pt) {
using Out = typename decltype(pt)::type;
using OutCudaType = cuda_internal::DataType<Out>;
Elementwise<const In, const Out, Out>(
IfLessElseASSAImpl<In, Out>{static_cast<InCudaType>(x2), static_cast<OutCudaType>(pos)}, x1_cast, neg_cast, out);
});
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(IfLessElseASSAKernel, CudaIfLessElseASSAKernel);
template <typename In, typename Out>
struct IfGreaterElseASSAImpl {
using InCudaType = cuda_internal::DataType<In>;
using OutCudaType = cuda_internal::DataType<Out>;
__device__ void operator()(int64_t /*i*/, InCudaType x1, OutCudaType neg, OutCudaType& out) { out = x1 > x2 ? pos : neg; }
InCudaType x2;
OutCudaType pos;
};
class CudaIfGreaterElseASSAKernel : public IfGreaterElseASSAKernel {
public:
void Call(const Array& x1, Scalar x2, Scalar pos, const Array& neg, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, neg, out);
Dtype x_dtype = ResultType(x1, x2);
const Array& x1_cast = x1.dtype() == x_dtype ? x1 : x1.AsType(x_dtype);
const Array& neg_cast = neg.dtype() == out.dtype() ? neg : neg.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitNumericDtype(x_dtype, [&](auto x_pt) {
using In = typename decltype(x_pt)::type;
using InCudaType = cuda_internal::DataType<In>;
VisitNumericDtype(out.dtype(), [&](auto pt) {
using Out = typename decltype(pt)::type;
using OutCudaType = cuda_internal::DataType<Out>;
Elementwise<const In, const Out, Out>(
IfGreaterElseASSAImpl<In, Out>{static_cast<InCudaType>(x2), static_cast<OutCudaType>(pos)}, x1_cast, neg_cast, out);
});
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(IfGreaterElseASSAKernel, CudaIfGreaterElseASSAKernel);
template <typename In, typename Out>
struct IfGreaterElseAAAAImpl {
using InCudaType = cuda_internal::DataType<In>;
using OutCudaType = cuda_internal::DataType<Out>;
__device__ void operator()(int64_t /*i*/, InCudaType x1, InCudaType x2, OutCudaType pos, OutCudaType neg, OutCudaType& out) {
out = x1 > x2 ? pos : neg;
}
};
class CudaIfGreaterElseAAAAKernel : public IfGreaterElseAAAAKernel {
public:
void Call(const Array& x1, const Array& x2, const Array& pos, const Array& neg, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, x2, pos, neg, out);
Dtype x_dtype = ResultType(x1, x2);
const Array& x1_cast = x1.dtype() == x_dtype ? x1 : x1.AsType(x_dtype);
const Array& x2_cast = x2.dtype() == x_dtype ? x2 : x2.AsType(x_dtype);
const Array& pos_cast = pos.dtype() == out.dtype() ? pos : pos.AsType(out.dtype());
const Array& neg_cast = neg.dtype() == out.dtype() ? neg : neg.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitNumericDtype(x_dtype, [&](auto x_pt) {
using In = typename decltype(x_pt)::type;
VisitNumericDtype(out.dtype(), [&](auto pt) {
using Out = typename decltype(pt)::type;
Elementwise<const In, const In, const Out, const Out, Out>(
IfGreaterElseAAAAImpl<In, Out>{}, x1_cast, x2_cast, pos_cast, neg_cast, out);
});
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(IfGreaterElseAAAAKernel, CudaIfGreaterElseAAAAKernel);
template <typename T>
struct TanhImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Tanh(x); }
};
} // namespace
} // namespace cuda
} // namespace chainerx
| c831d44867e79984b932be390a0d46b277faa660.cu | #include "chainerx/cuda/cuda_device.h"
#include <cmath>
#include <cstdint>
#include <cuda_runtime.h>
#include "chainerx/array.h"
#include "chainerx/cuda/cuda_runtime.h"
#include "chainerx/cuda/cuda_set_device_scope.h"
#include "chainerx/cuda/elementwise.cuh"
#include "chainerx/cuda/kernel_regist.h"
#include "chainerx/cuda/numeric.cuh"
#include "chainerx/device.h"
#include "chainerx/dtype.h"
#include "chainerx/kernels/math.h"
#include "chainerx/numeric.h"
#include "chainerx/routines/math.h"
#include "chainerx/routines/type_util.h"
#include "chainerx/scalar.h"
namespace chainerx {
namespace cuda {
namespace {
template <typename In, typename Out>
struct IfLessElseASSAImpl {
using InCudaType = cuda_internal::DataType<In>;
using OutCudaType = cuda_internal::DataType<Out>;
__device__ void operator()(int64_t /*i*/, InCudaType x1, OutCudaType neg, OutCudaType& out) { out = x1 < x2 ? pos : neg; }
InCudaType x2;
OutCudaType pos;
};
class CudaIfLessElseASSAKernel : public IfLessElseASSAKernel {
public:
void Call(const Array& x1, Scalar x2, Scalar pos, const Array& neg, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, neg, out);
Dtype x_dtype = ResultType(x1, x2);
const Array& x1_cast = x1.dtype() == x_dtype ? x1 : x1.AsType(x_dtype);
const Array& neg_cast = neg.dtype() == out.dtype() ? neg : neg.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitNumericDtype(x_dtype, [&](auto x_pt) {
using In = typename decltype(x_pt)::type;
using InCudaType = cuda_internal::DataType<In>;
VisitNumericDtype(out.dtype(), [&](auto pt) {
using Out = typename decltype(pt)::type;
using OutCudaType = cuda_internal::DataType<Out>;
Elementwise<const In, const Out, Out>(
IfLessElseASSAImpl<In, Out>{static_cast<InCudaType>(x2), static_cast<OutCudaType>(pos)}, x1_cast, neg_cast, out);
});
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(IfLessElseASSAKernel, CudaIfLessElseASSAKernel);
template <typename In, typename Out>
struct IfGreaterElseASSAImpl {
using InCudaType = cuda_internal::DataType<In>;
using OutCudaType = cuda_internal::DataType<Out>;
__device__ void operator()(int64_t /*i*/, InCudaType x1, OutCudaType neg, OutCudaType& out) { out = x1 > x2 ? pos : neg; }
InCudaType x2;
OutCudaType pos;
};
class CudaIfGreaterElseASSAKernel : public IfGreaterElseASSAKernel {
public:
void Call(const Array& x1, Scalar x2, Scalar pos, const Array& neg, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, neg, out);
Dtype x_dtype = ResultType(x1, x2);
const Array& x1_cast = x1.dtype() == x_dtype ? x1 : x1.AsType(x_dtype);
const Array& neg_cast = neg.dtype() == out.dtype() ? neg : neg.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitNumericDtype(x_dtype, [&](auto x_pt) {
using In = typename decltype(x_pt)::type;
using InCudaType = cuda_internal::DataType<In>;
VisitNumericDtype(out.dtype(), [&](auto pt) {
using Out = typename decltype(pt)::type;
using OutCudaType = cuda_internal::DataType<Out>;
Elementwise<const In, const Out, Out>(
IfGreaterElseASSAImpl<In, Out>{static_cast<InCudaType>(x2), static_cast<OutCudaType>(pos)}, x1_cast, neg_cast, out);
});
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(IfGreaterElseASSAKernel, CudaIfGreaterElseASSAKernel);
template <typename In, typename Out>
struct IfGreaterElseAAAAImpl {
using InCudaType = cuda_internal::DataType<In>;
using OutCudaType = cuda_internal::DataType<Out>;
__device__ void operator()(int64_t /*i*/, InCudaType x1, InCudaType x2, OutCudaType pos, OutCudaType neg, OutCudaType& out) {
out = x1 > x2 ? pos : neg;
}
};
class CudaIfGreaterElseAAAAKernel : public IfGreaterElseAAAAKernel {
public:
void Call(const Array& x1, const Array& x2, const Array& pos, const Array& neg, const Array& out) override {
Device& device = x1.device();
device.CheckDevicesCompatible(x1, x2, pos, neg, out);
Dtype x_dtype = ResultType(x1, x2);
const Array& x1_cast = x1.dtype() == x_dtype ? x1 : x1.AsType(x_dtype);
const Array& x2_cast = x2.dtype() == x_dtype ? x2 : x2.AsType(x_dtype);
const Array& pos_cast = pos.dtype() == out.dtype() ? pos : pos.AsType(out.dtype());
const Array& neg_cast = neg.dtype() == out.dtype() ? neg : neg.AsType(out.dtype());
CudaSetDeviceScope scope{device.index()};
VisitNumericDtype(x_dtype, [&](auto x_pt) {
using In = typename decltype(x_pt)::type;
VisitNumericDtype(out.dtype(), [&](auto pt) {
using Out = typename decltype(pt)::type;
Elementwise<const In, const In, const Out, const Out, Out>(
IfGreaterElseAAAAImpl<In, Out>{}, x1_cast, x2_cast, pos_cast, neg_cast, out);
});
});
}
};
CHAINERX_CUDA_REGISTER_KERNEL(IfGreaterElseAAAAKernel, CudaIfGreaterElseAAAAKernel);
template <typename T>
struct TanhImpl {
using CudaType = cuda_internal::DataType<T>;
__device__ void operator()(int64_t /*i*/, CudaType x, CudaType& out) { out = cuda::Tanh(x); }
};
} // namespace
} // namespace cuda
} // namespace chainerx
|
bc8cd30e3425f994b5e8457a226bd3ef257eccc2.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <time.h>
#include <hip/hip_runtime.h>
/*
* Lectura Archivo
*/
void Read(float** R, float** G, float** B,
int *N, int *S, int **ordenamiento, int* P, const char *filename) {
FILE *fp;
fp = fopen(filename, "r");
fscanf(fp, "%d %d\n", N, S);
int imsize = (*N) * (*N);
*P = (*N)/ (*S);
int P2 = (*P) * (*P) ;
float* R1 = new float[imsize];
float* G1 = new float[imsize];
float* B1 = new float[imsize];
int *orden_temp = new int[P2];
for(int i = 0; i < P2; i++)
fscanf(fp, "%d ", &(orden_temp[i]));
for(int i = 0; i < imsize; i++)
fscanf(fp, "%f ", &(R1[i]));
for(int i = 0; i < imsize; i++)
fscanf(fp, "%f ", &(G1[i]));
for(int i = 0; i < imsize; i++)
fscanf(fp, "%f ", &(B1[i]));
fclose(fp);
*R = R1; *G = G1; *B = B1;
*ordenamiento = orden_temp;
}
/*
* Escritura Archivo
*/
void Write(float* R, float* G, float* B,
int M, int N, const char *filename) {
FILE *fp;
fp = fopen(filename, "w");
fprintf(fp, "%d %d\n", M, N);
for(int i = 0; i < M*N-1; i++)
fprintf(fp, "%f ", R[i]);
fprintf(fp, "%f\n", R[M*N-1]);
for(int i = 0; i < M*N-1; i++)
fprintf(fp, "%f ", G[i]);
fprintf(fp, "%f\n", G[M*N-1]);
for(int i = 0; i < M*N-1; i++)
fprintf(fp, "%f ", B[i]);
fprintf(fp, "%f\n", B[M*N-1]);
fclose(fp);
}
/*
* Procesamiento Imagen CPU
*/
void funcionCPU(float* R,float* G,float* B, float* Rout,float* Gout,float* Bout, int N, int S, int P, int* ordenamiento){
for (int i = 0; i< N*N; i++){
int x = i % N;
int y = i / N;
int x_bloque_escritura = x / S;
int y_bloque_escritura = y /S;
int indice_bloque_escritura = x_bloque_escritura + y_bloque_escritura * P;
int indice_bloque_lectura = ordenamiento[indice_bloque_escritura];
int x_bloque_lectura = indice_bloque_lectura % P;
int y_bloque_lectura = indice_bloque_lectura / P;
int shift_horizontal = x_bloque_lectura - x_bloque_escritura;
int shift_vertical = y_bloque_lectura - y_bloque_escritura;
x = x + shift_horizontal * S;
y = y + shift_vertical * S;
Rout[i] = R[x + y*N];
Gout[i] = G[x + y*N];
Bout[i] = B[x + y*N];
}
}
/*
* Procesamiento Imagen GPU
*/
__global__ void kernelGPU(float* R,float* G,float* B, float* Rout,float* Gout,float* Bout, int N, int S, int P, int* ordenamiento){
int i = threadIdx.x + blockDim.x* blockIdx.x;
if (i < N*N){
int x = i % N;
int y = i / N;
int x_bloque_escritura = x / S;
int y_bloque_escritura = y /S;
int indice_bloque_escritura = x_bloque_escritura + y_bloque_escritura * P;
int indice_bloque_lectura = ordenamiento[indice_bloque_escritura];
int x_bloque_lectura = indice_bloque_lectura % P;
int y_bloque_lectura = indice_bloque_lectura / P;
int shift_horizontal = x_bloque_lectura - x_bloque_escritura;
int shift_vertical = y_bloque_lectura - y_bloque_escritura;
x = x + shift_horizontal * S;
y = y + shift_vertical * S;
Rout[i] = R[x + y*N];
Gout[i] = G[x + y*N];
Bout[i] = B[x + y*N];
}
}
/*
* Codigo Principal
*/
int main(int argc, char **argv){
/*
* Inicializacion
*/
clock_t t1, t2;
hipEvent_t ct1, ct2;
double ms;
float dt;
int N, S;
int P;
int *ordenamiento; //arreglo con el ordenamiento
int* ordenamiento_dev;
float *Rhost, *Ghost, *Bhost;
float *Rhostout, *Ghostout, *Bhostout;
float *Rdev, *Gdev, *Bdev;
float *Rdevout, *Gdevout, *Bdevout;
char names[1][3][20] = {{"img100x100.txt\0", "img100x100CPU.txt\0", "img100x100GPU.txt\0"}};
for (int i=0; i<1; i++){
Read(&Rhost, &Ghost, &Bhost, &N, &S, &ordenamiento, &P, names[i][0]);
/*
* Parte CPU
*/
Rhostout = new float[N*N];
Ghostout = new float[N*N];
Bhostout = new float[N*N];
t1 = clock();
funcionCPU(Rhost,Ghost,Bhost, Rhostout,Ghostout,Bhostout, N, S, P, ordenamiento); // Agregar parametros!
t2 = clock();
ms = 1000.0 * (double)(t2 - t1) / CLOCKS_PER_SEC;
std::cout << "Tiempo CPU: " << ms << "[ms]" << std::endl;
Write(Rhostout, Ghostout, Bhostout, N, N, names[i][1]);
delete[] Rhostout; delete[] Ghostout; delete[] Bhostout;
/*
* Parte GPU
*/
int grid_size, block_size = 256;
grid_size = (int)ceil((float) N * N / block_size);
hipMalloc((void**)&Rdev, N * N * sizeof(float));
hipMalloc((void**)&Gdev, N * N * sizeof(float));
hipMalloc((void**)&Bdev, N * N * sizeof(float));
hipMalloc((void**)&ordenamiento_dev, (P*P) * sizeof(int));
hipMemcpy(Rdev, Rhost, N * N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(Gdev, Ghost, N * N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(Bdev, Bhost, N * N * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(ordenamiento_dev, ordenamiento, (P*P) * sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void**)&Rdevout, N * N * sizeof(float));
hipMalloc((void**)&Gdevout, N * N * sizeof(float));
hipMalloc((void**)&Bdevout, N * N * sizeof(float));
hipEventCreate(&ct1);
hipEventCreate(&ct2);
hipEventRecord(ct1);
hipLaunchKernelGGL(( kernelGPU), dim3(grid_size), dim3(block_size), 0, 0, Rdev,Gdev,Bdev, Rdevout,Gdevout,Bdevout, N, S, P, ordenamiento_dev); // Agregar parametros!
hipEventRecord(ct2);
hipEventSynchronize(ct2);
hipEventElapsedTime(&dt, ct1, ct2);
std::cout << "Tiempo GPU: " << dt << "[ms]" << std::endl;
Rhostout = new float[N*N];
Ghostout = new float[N*N];
Bhostout = new float[N*N];
hipMemcpy(Rhostout, Rdevout, N * N * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(Ghostout, Gdevout, N * N * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(Bhostout, Bdevout, N * N * sizeof(float), hipMemcpyDeviceToHost);
Write(Rhostout, Ghostout, Bhostout, N, N, names[i][2]);
hipFree(Rdev); hipFree(Gdev); hipFree(Bdev);
hipFree(Rdevout); hipFree(Gdevout); hipFree(Bdevout);
hipFree(ordenamiento_dev);
delete[] Rhost; delete[] Ghost; delete[] Bhost;
delete[] Rhostout; delete[] Ghostout; delete[] Bhostout;
delete[] ordenamiento;
}
return 0;
} | bc8cd30e3425f994b5e8457a226bd3ef257eccc2.cu | #include <iostream>
#include <time.h>
#include <cuda_runtime.h>
/*
* Lectura Archivo
*/
void Read(float** R, float** G, float** B,
int *N, int *S, int **ordenamiento, int* P, const char *filename) {
FILE *fp;
fp = fopen(filename, "r");
fscanf(fp, "%d %d\n", N, S);
int imsize = (*N) * (*N);
*P = (*N)/ (*S);
int P2 = (*P) * (*P) ;
float* R1 = new float[imsize];
float* G1 = new float[imsize];
float* B1 = new float[imsize];
int *orden_temp = new int[P2];
for(int i = 0; i < P2; i++)
fscanf(fp, "%d ", &(orden_temp[i]));
for(int i = 0; i < imsize; i++)
fscanf(fp, "%f ", &(R1[i]));
for(int i = 0; i < imsize; i++)
fscanf(fp, "%f ", &(G1[i]));
for(int i = 0; i < imsize; i++)
fscanf(fp, "%f ", &(B1[i]));
fclose(fp);
*R = R1; *G = G1; *B = B1;
*ordenamiento = orden_temp;
}
/*
* Escritura Archivo
*/
void Write(float* R, float* G, float* B,
int M, int N, const char *filename) {
FILE *fp;
fp = fopen(filename, "w");
fprintf(fp, "%d %d\n", M, N);
for(int i = 0; i < M*N-1; i++)
fprintf(fp, "%f ", R[i]);
fprintf(fp, "%f\n", R[M*N-1]);
for(int i = 0; i < M*N-1; i++)
fprintf(fp, "%f ", G[i]);
fprintf(fp, "%f\n", G[M*N-1]);
for(int i = 0; i < M*N-1; i++)
fprintf(fp, "%f ", B[i]);
fprintf(fp, "%f\n", B[M*N-1]);
fclose(fp);
}
/*
* Procesamiento Imagen CPU
*/
void funcionCPU(float* R,float* G,float* B, float* Rout,float* Gout,float* Bout, int N, int S, int P, int* ordenamiento){
for (int i = 0; i< N*N; i++){
int x = i % N;
int y = i / N;
int x_bloque_escritura = x / S;
int y_bloque_escritura = y /S;
int indice_bloque_escritura = x_bloque_escritura + y_bloque_escritura * P;
int indice_bloque_lectura = ordenamiento[indice_bloque_escritura];
int x_bloque_lectura = indice_bloque_lectura % P;
int y_bloque_lectura = indice_bloque_lectura / P;
int shift_horizontal = x_bloque_lectura - x_bloque_escritura;
int shift_vertical = y_bloque_lectura - y_bloque_escritura;
x = x + shift_horizontal * S;
y = y + shift_vertical * S;
Rout[i] = R[x + y*N];
Gout[i] = G[x + y*N];
Bout[i] = B[x + y*N];
}
}
/*
* Procesamiento Imagen GPU
*/
__global__ void kernelGPU(float* R,float* G,float* B, float* Rout,float* Gout,float* Bout, int N, int S, int P, int* ordenamiento){
int i = threadIdx.x + blockDim.x* blockIdx.x;
if (i < N*N){
int x = i % N;
int y = i / N;
int x_bloque_escritura = x / S;
int y_bloque_escritura = y /S;
int indice_bloque_escritura = x_bloque_escritura + y_bloque_escritura * P;
int indice_bloque_lectura = ordenamiento[indice_bloque_escritura];
int x_bloque_lectura = indice_bloque_lectura % P;
int y_bloque_lectura = indice_bloque_lectura / P;
int shift_horizontal = x_bloque_lectura - x_bloque_escritura;
int shift_vertical = y_bloque_lectura - y_bloque_escritura;
x = x + shift_horizontal * S;
y = y + shift_vertical * S;
Rout[i] = R[x + y*N];
Gout[i] = G[x + y*N];
Bout[i] = B[x + y*N];
}
}
/*
* Codigo Principal
*/
int main(int argc, char **argv){
/*
* Inicializacion
*/
clock_t t1, t2;
cudaEvent_t ct1, ct2;
double ms;
float dt;
int N, S;
int P;
int *ordenamiento; //arreglo con el ordenamiento
int* ordenamiento_dev;
float *Rhost, *Ghost, *Bhost;
float *Rhostout, *Ghostout, *Bhostout;
float *Rdev, *Gdev, *Bdev;
float *Rdevout, *Gdevout, *Bdevout;
char names[1][3][20] = {{"img100x100.txt\0", "img100x100CPU.txt\0", "img100x100GPU.txt\0"}};
for (int i=0; i<1; i++){
Read(&Rhost, &Ghost, &Bhost, &N, &S, &ordenamiento, &P, names[i][0]);
/*
* Parte CPU
*/
Rhostout = new float[N*N];
Ghostout = new float[N*N];
Bhostout = new float[N*N];
t1 = clock();
funcionCPU(Rhost,Ghost,Bhost, Rhostout,Ghostout,Bhostout, N, S, P, ordenamiento); // Agregar parametros!
t2 = clock();
ms = 1000.0 * (double)(t2 - t1) / CLOCKS_PER_SEC;
std::cout << "Tiempo CPU: " << ms << "[ms]" << std::endl;
Write(Rhostout, Ghostout, Bhostout, N, N, names[i][1]);
delete[] Rhostout; delete[] Ghostout; delete[] Bhostout;
/*
* Parte GPU
*/
int grid_size, block_size = 256;
grid_size = (int)ceil((float) N * N / block_size);
cudaMalloc((void**)&Rdev, N * N * sizeof(float));
cudaMalloc((void**)&Gdev, N * N * sizeof(float));
cudaMalloc((void**)&Bdev, N * N * sizeof(float));
cudaMalloc((void**)&ordenamiento_dev, (P*P) * sizeof(int));
cudaMemcpy(Rdev, Rhost, N * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(Gdev, Ghost, N * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(Bdev, Bhost, N * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(ordenamiento_dev, ordenamiento, (P*P) * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**)&Rdevout, N * N * sizeof(float));
cudaMalloc((void**)&Gdevout, N * N * sizeof(float));
cudaMalloc((void**)&Bdevout, N * N * sizeof(float));
cudaEventCreate(&ct1);
cudaEventCreate(&ct2);
cudaEventRecord(ct1);
kernelGPU<<<grid_size, block_size>>>(Rdev,Gdev,Bdev, Rdevout,Gdevout,Bdevout, N, S, P, ordenamiento_dev); // Agregar parametros!
cudaEventRecord(ct2);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
std::cout << "Tiempo GPU: " << dt << "[ms]" << std::endl;
Rhostout = new float[N*N];
Ghostout = new float[N*N];
Bhostout = new float[N*N];
cudaMemcpy(Rhostout, Rdevout, N * N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(Ghostout, Gdevout, N * N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(Bhostout, Bdevout, N * N * sizeof(float), cudaMemcpyDeviceToHost);
Write(Rhostout, Ghostout, Bhostout, N, N, names[i][2]);
cudaFree(Rdev); cudaFree(Gdev); cudaFree(Bdev);
cudaFree(Rdevout); cudaFree(Gdevout); cudaFree(Bdevout);
cudaFree(ordenamiento_dev);
delete[] Rhost; delete[] Ghost; delete[] Bhost;
delete[] Rhostout; delete[] Ghostout; delete[] Bhostout;
delete[] ordenamiento;
}
return 0;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.