hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
d5d769dbd9f046fecfa4d563075b97bb6ca1d619.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <time.h>
#define EXPO 7
//the right way to add in cuda driver if you have an gpu
//http://askubuntu.com/questions/451221/ubuntu-14-04-install-nvidia-driver
__global__ void BackwardKernel(int k,int blockRow, int blockColumn,float* deviceA, float* deviceB, float* deviceC, float* deviceD,float* deviceFinalX,float initialValue)
{
int bx1=blockIdx.x;
int by1=blockIdx.y;
int tx1=threadIdx.x;
int ty1=threadIdx.y;
//printf("inside of kernle %f \n",deviceFinalX[4]);
int backhelper1=ty1*blockColumn+tx1+1;
int backhelper2=2*backhelper1-1;//(int((2*backhelper1-1)*pow(2.0,1.0*(k-1))))/(int)(pow(2.0,(k-1)*1.0));
int backhelper3=(int)pow(2.0,(EXPO+1)*1.0);
int backhelper4=(int)pow(2.0,(EXPO-k+2)*1.0);
int h=(int)(pow(2.0,1.0*(k-1)));
float backhelperd=deviceD[-k+backhelper3-backhelper4+backhelper2];
float backhelpera=deviceA[-k+backhelper3-backhelper4+backhelper2];
float backhelperb=deviceB[-k+backhelper3-backhelper4+backhelper2];
float backhelperc=deviceC[-k+backhelper3-backhelper4+backhelper2];
int xindex1=backhelper2*pow(2.0,1.0*(k-1))-h;
int xindex2=backhelper2*pow(2.0,1.0*(k-1))+h;
//so thread i will be in charge of (2i-1)*2^(k-1) calculation
//printf("%d ",int((2*backhelper1-1)*pow(2.0,1.0*(k-1))));
deviceFinalX[(int)(backhelper2*pow(2.0,1.0*(k-1)))]=(backhelperd-backhelpera*deviceFinalX[xindex1]-backhelperc*deviceFinalX[xindex2])*1.0/backhelperb;
__syncthreads();
}
//this is the kernel to calculate the P=(a,b,c,d)
//need to pass in the step which is j, and then figure out which thread to work on
//the calculation in (2^j,2*2^j,3*2^j....)
__global__ void CalculatePArrayKernel(int step,int blockRow, int blockColumn,float* deviceA, float* deviceB, float* deviceC, float* deviceD)
{
int bx=blockIdx.x;
int by=blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
int helper11=pow(2.0,(EXPO+1)*1.0);
int helper22=pow(2.0,(EXPO-step+1)*1.0);
int helper44=pow(2.0,(EXPO-step+2)*1.0);
int helper33=pow(2.0,EXPO*1.0)-1;
//printf("step is running: %d \n",step);
// if(helper3<pow(2.0,(EXPO-step)*1.0)-1)
//--step 1 is special case.
/* if((tx!=(blockColumn-1))&&(ty!=(blockRow-1)))-----this is very important branch divergence happen here, need
//to figure out how exactly cuda works!!
/*****calcualte A******************/
int helper1=helper11;
int helper2=helper22;
int helper4=helper44;
int flag=0;//special for step1.
if(step==1)
{
helper1=0;
helper2=0;
helper4=0;
flag=1;
}
int helper3=ty*blockColumn+tx+1;
if(helper3<=(pow(2.0,1.0*(EXPO-step))-1.0))
{
float ahelperfora1=deviceA[-step+helper1-helper4+2*(helper3)];
float ahelperfora2=deviceA[-step+helper1-helper4+2*(helper3)-1];
float bhelperfora1=deviceB[-step+helper1-helper4+2*(helper3)-1];
deviceA[-1-step+helper1-helper2+helper3+flag*(1+helper33)]=-1*(ahelperfora1)*ahelperfora2/bhelperfora1;
//*****calculate C******************/
float chelperforc1=deviceC[-step+helper1-helper4+2*(helper3)];
float chelperforc2=deviceC[-step+helper1-helper4+2*(helper3)+1];
float bhelperforc1=deviceB[-step+helper1-helper4+2*(helper3)+1];
deviceC[-1-step+helper1-helper2+helper3+flag*(1+helper33)]=-1*chelperforc1*chelperforc2/bhelperforc1;
//calculate B***********************************************//
float bhelperforb1=deviceB[-step+helper1-helper4+2*(helper3)];
float bhelperforb2=deviceB[-step+helper1-helper4+2*(helper3)-1];
float bhelperforb3=deviceB[-step+helper1-helper4+2*(helper3)+1];
float ahelperforb1=deviceA[-step+helper1-helper4+2*(helper3)];
float ahelperforb2=deviceA[-step+helper1-helper4+2*(helper3)+1];
float chelperforb1=deviceC[-step+helper1-helper4+2*(helper3)-1];
float chelperforb2=deviceC[-step+helper1-helper4+2*(helper3)];
deviceB[-1-step+helper1-helper2+helper3+flag*(1+helper33)]=bhelperforb1-ahelperforb1/bhelperforb2*chelperforb1-chelperforb2/bhelperforb3*ahelperforb2;
//calculate D***************************************************//
float dhelperford1=deviceD[-step+helper1-helper4+2*(helper3)];
float dhelperford2=deviceD[-step+helper1-helper4+2*(helper3)-1];
float dhelperford3=deviceD[-step+helper1-helper4+2*(helper3)+1];
float ahelperford1=deviceA[-step+helper1-helper4+2*(helper3)];
float bhelperford1=deviceB[-step+helper1-helper4+2*(helper3)-1];
float bhelperford2=deviceB[-step+helper1-helper4+2*(helper3)+1];
float chelperford1=deviceC[-step+helper1-helper4+2*(helper3)];
deviceD[-1-step+helper1-helper2+helper3+flag*(1+helper33)]=dhelperford1-ahelperford1/bhelperford1*dhelperford2-chelperford1/bhelperford2*dhelperford3;
}
__syncthreads();
}
int main()
{
int m=pow(2,EXPO)-1; //think of our example as n=3 then m will be 7 here
/*printf("m value is %d",m);*/
int b=1;
int a=0;
float delta=(b-a)*1.0/(m+1.0); //this is correct , think of m as the number of inner
float *A;
float *B;
float *C;
float *D;
float *FinalX;
//by careful calculation, we figure out we need (2^n-1)*2
//so the orinal step need to store 2^n-1 value, then step 1 needs 2^(n-1)-1 value and the last one will be 2^1-1 value.
//so chuck size will be 2^n-1+2^(n-1)-1+....+2-1
//int chunkLength=(pow(2,EXPO)-1)*2;
//ad one for the extra thread that never going to use, so in this way it will not be out of index
int finalLengthX=(int)pow(2,EXPO)+1;
int chunkLength=(pow(2,EXPO)-1)*2+1;
int chunkSize=chunkLength*sizeof(float);
A=(float*)malloc(chunkSize);
B=(float*)malloc(chunkSize);
C=(float*)malloc(chunkSize);
D=(float*)malloc(chunkSize);
FinalX=(float*)malloc(finalLengthX*sizeof(float));
A[0]=0;
//int vectorLength=EXPO*m;
for(int i=1;i<m;i++)
{
A[i]=1-delta*delta*0.5*(i+1);
}
//else will be 0
for(int i=m;i<chunkLength;i++)
{
A[i]=0;
}
for(int i=0;i<m;i++)
{
B[i]=-2+delta*delta*1.0;
}
for(int i=m;i<chunkLength;i++)
{
B[i]=0;
}
C[m-1]=0;
for(int i=0;i<m-1;i++)
{
C[i]=1+0.5*delta*delta*(i+1);
}
for(int i=m;i<chunkLength;i++)
{
C[i]=0;
}
/* D[0]=2*delta*delta*delta+0.5*delta*delta-1;*/
for(int i=0;i<m-1;i++)
{
D[i]=2*(i+1)*pow(delta,3);
}
D[m-1]=2*m*delta*delta*delta-1+3.5*delta*delta;
for(int i=m;i<chunkLength;i++)
{
D[i]=0;
}
clock_t begin,end;
begin=clock();
//so need to set up different grid dimension for different value of j,
//when j decrease the size of the thread using will decrease.
//dim3 dimGrid(1,4); //so we have 4 blocks each block will in charge a,b,c,d respectly.
//http://stackoverflow.com/questions/5029920/how-to-use-2d-arrays-in-cuda
//according to the above post, the following is the correct way to allocate 2D array on cuda devixe
/* float *deviceA, *deviceB, *deviceC, *deviceD;
size_t pitch;
hipMallocPitch((void**)&deviceA,&pitch,m*sizeof(float),EXPO);
hipMallocPitch((void**)&deviceB,&pitch,m*sizeof(float),EXPO);
hipMallocPitch((void**)&deviceC,&pitch,m*sizeof(float),EXPO);
hipMallocPitch((void**)&deviceD,&pitch,m*sizeof(float),EXPO);*/
float *deviceA, *deviceB, *deviceC, *deviceD,*deviceFinalX;
hipMalloc((void**)&deviceA,chunkSize);
hipMalloc((void**)&deviceB,chunkSize);
hipMalloc((void**)&deviceC,chunkSize);
hipMalloc((void**)&deviceD,chunkSize);
hipMalloc((void**)&deviceFinalX,finalLengthX*sizeof(float));
//copy the host vector to device.
hipMemcpy(deviceA,A,chunkSize,hipMemcpyHostToDevice);
hipMemcpy(deviceB,B,chunkSize,hipMemcpyHostToDevice);
hipMemcpy(deviceC,C,chunkSize,hipMemcpyHostToDevice);
hipMemcpy(deviceD,D,chunkSize,hipMemcpyHostToDevice);
//deviceA, deviceB, deviceC, deviceD is designed to be the global memory of cuda.
//forward
for(int j=1;j<EXPO;j++)
{
//the lock size should change, the first step it will need 2^(n-j)-1, so first step will be 3 if n=3
dim3 dimGrid(1,1);
int blockRow=pow(2,(EXPO-j)/2);
//printf("blockrow is :%d \n",blockRow);
int blockColumn=pow(2,EXPO-j-(EXPO-j)/2);
//printf("blockColumn is :%d \n",blockColumn);
dim3 dimBlock(blockColumn,blockRow);
//in each step the processor being used should decrease should be 2^(n-j)-1 in jth step
hipLaunchKernelGGL(( CalculatePArrayKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, j,blockRow,blockColumn,deviceA,deviceB,deviceC,deviceD);
}
//backward
//copy the device vector to host
hipMemcpy(A,deviceA,chunkSize,hipMemcpyDeviceToHost);
hipMemcpy(B,deviceB,chunkSize,hipMemcpyDeviceToHost);
hipMemcpy(C,deviceC,chunkSize,hipMemcpyDeviceToHost);
hipMemcpy(D,deviceD,chunkSize,hipMemcpyDeviceToHost);
int lastIndex=(int)pow(2,EXPO+1)-EXPO-3;
float initialValue=D[lastIndex]/B[lastIndex];
FinalX[0]=0;
FinalX[(int)pow(2,EXPO-1)]=initialValue;
printf("the value in the middle is: %f and this suppose to close to 0.5 when n goes big! \n",FinalX[(int)pow(2,EXPO-1)]);
hipMemcpy(deviceFinalX,FinalX,finalLengthX*sizeof(float),hipMemcpyHostToDevice);
for(int k=EXPO-1;k>=1;k--)
{
//so the most one will use 2^(n-k) variable will be covered!
dim3 dimGrid(1,1);
int blockRow=pow(2,(EXPO-k)/2);
int blockColumn=pow(2,EXPO-k-(EXPO-k)/2);
dim3 dimBlock(blockColumn,blockRow);
hipLaunchKernelGGL(( BackwardKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, k,blockRow,blockColumn,deviceA,deviceB,deviceC,deviceD,deviceFinalX,initialValue);
}
hipMemcpy(FinalX,deviceFinalX,finalLengthX*sizeof(float),hipMemcpyDeviceToHost);
printf(" \n");
printf(" A \n");
for(int i=0;i<chunkLength;i++)
{
if(i%8==0)
{
printf("\n");
}
printf("%f ",A[i]);
}
printf(" \n");
printf(" B \n");
for(int i=0;i<chunkLength;i++)
{
if(i%8==0)
{
printf("\n");
}
printf("%f ",B[i]);
}
printf(" \n");
printf(" C \n");
for(int i=0;i<chunkLength;i++)
{
if(i%8==0)
{
printf("\n");
}
printf("%f ",C[i]);
}
printf(" \n");
printf(" D \n");
for(int i=0;i<chunkLength;i++)
{
if(i%8==0)
{
printf("\n");
}
printf("%f ",D[i]);
}
double time_spent;
end=clock();
time_spent=(double)(end-begin)/CLOCKS_PER_SEC;
printf("\n the following are the solutions.");
for(int i=0;i<finalLengthX;i++)
{
if(i%8==0)
{
printf("\n");
}
printf("%f ",FinalX[i]);
}
printf("\n time used to calculate this is :%f seconds \n",time_spent);
hipFree(deviceA);
hipFree(deviceB);
hipFree(deviceC);
hipFree(deviceD);
free(A);
free(B);
free(C);
free(D);
return 0;
}
| d5d769dbd9f046fecfa4d563075b97bb6ca1d619.cu | #include <stdio.h>
#include <cuda.h>
#include <time.h>
#define EXPO 7
//the right way to add in cuda driver if you have an gpu
//http://askubuntu.com/questions/451221/ubuntu-14-04-install-nvidia-driver
__global__ void BackwardKernel(int k,int blockRow, int blockColumn,float* deviceA, float* deviceB, float* deviceC, float* deviceD,float* deviceFinalX,float initialValue)
{
int bx1=blockIdx.x;
int by1=blockIdx.y;
int tx1=threadIdx.x;
int ty1=threadIdx.y;
//printf("inside of kernle %f \n",deviceFinalX[4]);
int backhelper1=ty1*blockColumn+tx1+1;
int backhelper2=2*backhelper1-1;//(int((2*backhelper1-1)*pow(2.0,1.0*(k-1))))/(int)(pow(2.0,(k-1)*1.0));
int backhelper3=(int)pow(2.0,(EXPO+1)*1.0);
int backhelper4=(int)pow(2.0,(EXPO-k+2)*1.0);
int h=(int)(pow(2.0,1.0*(k-1)));
float backhelperd=deviceD[-k+backhelper3-backhelper4+backhelper2];
float backhelpera=deviceA[-k+backhelper3-backhelper4+backhelper2];
float backhelperb=deviceB[-k+backhelper3-backhelper4+backhelper2];
float backhelperc=deviceC[-k+backhelper3-backhelper4+backhelper2];
int xindex1=backhelper2*pow(2.0,1.0*(k-1))-h;
int xindex2=backhelper2*pow(2.0,1.0*(k-1))+h;
//so thread i will be in charge of (2i-1)*2^(k-1) calculation
//printf("%d ",int((2*backhelper1-1)*pow(2.0,1.0*(k-1))));
deviceFinalX[(int)(backhelper2*pow(2.0,1.0*(k-1)))]=(backhelperd-backhelpera*deviceFinalX[xindex1]-backhelperc*deviceFinalX[xindex2])*1.0/backhelperb;
__syncthreads();
}
//this is the kernel to calculate the P=(a,b,c,d)
//need to pass in the step which is j, and then figure out which thread to work on
//the calculation in (2^j,2*2^j,3*2^j....)
__global__ void CalculatePArrayKernel(int step,int blockRow, int blockColumn,float* deviceA, float* deviceB, float* deviceC, float* deviceD)
{
int bx=blockIdx.x;
int by=blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
int helper11=pow(2.0,(EXPO+1)*1.0);
int helper22=pow(2.0,(EXPO-step+1)*1.0);
int helper44=pow(2.0,(EXPO-step+2)*1.0);
int helper33=pow(2.0,EXPO*1.0)-1;
//printf("step is running: %d \n",step);
// if(helper3<pow(2.0,(EXPO-step)*1.0)-1)
//--step 1 is special case.
/* if((tx!=(blockColumn-1))&&(ty!=(blockRow-1)))-----this is very important branch divergence happen here, need
//to figure out how exactly cuda works!!
/*****calcualte A******************/
int helper1=helper11;
int helper2=helper22;
int helper4=helper44;
int flag=0;//special for step1.
if(step==1)
{
helper1=0;
helper2=0;
helper4=0;
flag=1;
}
int helper3=ty*blockColumn+tx+1;
if(helper3<=(pow(2.0,1.0*(EXPO-step))-1.0))
{
float ahelperfora1=deviceA[-step+helper1-helper4+2*(helper3)];
float ahelperfora2=deviceA[-step+helper1-helper4+2*(helper3)-1];
float bhelperfora1=deviceB[-step+helper1-helper4+2*(helper3)-1];
deviceA[-1-step+helper1-helper2+helper3+flag*(1+helper33)]=-1*(ahelperfora1)*ahelperfora2/bhelperfora1;
//*****calculate C******************/
float chelperforc1=deviceC[-step+helper1-helper4+2*(helper3)];
float chelperforc2=deviceC[-step+helper1-helper4+2*(helper3)+1];
float bhelperforc1=deviceB[-step+helper1-helper4+2*(helper3)+1];
deviceC[-1-step+helper1-helper2+helper3+flag*(1+helper33)]=-1*chelperforc1*chelperforc2/bhelperforc1;
//calculate B***********************************************//
float bhelperforb1=deviceB[-step+helper1-helper4+2*(helper3)];
float bhelperforb2=deviceB[-step+helper1-helper4+2*(helper3)-1];
float bhelperforb3=deviceB[-step+helper1-helper4+2*(helper3)+1];
float ahelperforb1=deviceA[-step+helper1-helper4+2*(helper3)];
float ahelperforb2=deviceA[-step+helper1-helper4+2*(helper3)+1];
float chelperforb1=deviceC[-step+helper1-helper4+2*(helper3)-1];
float chelperforb2=deviceC[-step+helper1-helper4+2*(helper3)];
deviceB[-1-step+helper1-helper2+helper3+flag*(1+helper33)]=bhelperforb1-ahelperforb1/bhelperforb2*chelperforb1-chelperforb2/bhelperforb3*ahelperforb2;
//calculate D***************************************************//
float dhelperford1=deviceD[-step+helper1-helper4+2*(helper3)];
float dhelperford2=deviceD[-step+helper1-helper4+2*(helper3)-1];
float dhelperford3=deviceD[-step+helper1-helper4+2*(helper3)+1];
float ahelperford1=deviceA[-step+helper1-helper4+2*(helper3)];
float bhelperford1=deviceB[-step+helper1-helper4+2*(helper3)-1];
float bhelperford2=deviceB[-step+helper1-helper4+2*(helper3)+1];
float chelperford1=deviceC[-step+helper1-helper4+2*(helper3)];
deviceD[-1-step+helper1-helper2+helper3+flag*(1+helper33)]=dhelperford1-ahelperford1/bhelperford1*dhelperford2-chelperford1/bhelperford2*dhelperford3;
}
__syncthreads();
}
int main()
{
int m=pow(2,EXPO)-1; //think of our example as n=3 then m will be 7 here
/*printf("m value is %d",m);*/
int b=1;
int a=0;
float delta=(b-a)*1.0/(m+1.0); //this is correct , think of m as the number of inner
float *A;
float *B;
float *C;
float *D;
float *FinalX;
//by careful calculation, we figure out we need (2^n-1)*2
//so the orinal step need to store 2^n-1 value, then step 1 needs 2^(n-1)-1 value and the last one will be 2^1-1 value.
//so chuck size will be 2^n-1+2^(n-1)-1+....+2-1
//int chunkLength=(pow(2,EXPO)-1)*2;
//ad one for the extra thread that never going to use, so in this way it will not be out of index
int finalLengthX=(int)pow(2,EXPO)+1;
int chunkLength=(pow(2,EXPO)-1)*2+1;
int chunkSize=chunkLength*sizeof(float);
A=(float*)malloc(chunkSize);
B=(float*)malloc(chunkSize);
C=(float*)malloc(chunkSize);
D=(float*)malloc(chunkSize);
FinalX=(float*)malloc(finalLengthX*sizeof(float));
A[0]=0;
//int vectorLength=EXPO*m;
for(int i=1;i<m;i++)
{
A[i]=1-delta*delta*0.5*(i+1);
}
//else will be 0
for(int i=m;i<chunkLength;i++)
{
A[i]=0;
}
for(int i=0;i<m;i++)
{
B[i]=-2+delta*delta*1.0;
}
for(int i=m;i<chunkLength;i++)
{
B[i]=0;
}
C[m-1]=0;
for(int i=0;i<m-1;i++)
{
C[i]=1+0.5*delta*delta*(i+1);
}
for(int i=m;i<chunkLength;i++)
{
C[i]=0;
}
/* D[0]=2*delta*delta*delta+0.5*delta*delta-1;*/
for(int i=0;i<m-1;i++)
{
D[i]=2*(i+1)*pow(delta,3);
}
D[m-1]=2*m*delta*delta*delta-1+3.5*delta*delta;
for(int i=m;i<chunkLength;i++)
{
D[i]=0;
}
clock_t begin,end;
begin=clock();
//so need to set up different grid dimension for different value of j,
//when j decrease the size of the thread using will decrease.
//dim3 dimGrid(1,4); //so we have 4 blocks each block will in charge a,b,c,d respectly.
//http://stackoverflow.com/questions/5029920/how-to-use-2d-arrays-in-cuda
//according to the above post, the following is the correct way to allocate 2D array on cuda devixe
/* float *deviceA, *deviceB, *deviceC, *deviceD;
size_t pitch;
cudaMallocPitch((void**)&deviceA,&pitch,m*sizeof(float),EXPO);
cudaMallocPitch((void**)&deviceB,&pitch,m*sizeof(float),EXPO);
cudaMallocPitch((void**)&deviceC,&pitch,m*sizeof(float),EXPO);
cudaMallocPitch((void**)&deviceD,&pitch,m*sizeof(float),EXPO);*/
float *deviceA, *deviceB, *deviceC, *deviceD,*deviceFinalX;
cudaMalloc((void**)&deviceA,chunkSize);
cudaMalloc((void**)&deviceB,chunkSize);
cudaMalloc((void**)&deviceC,chunkSize);
cudaMalloc((void**)&deviceD,chunkSize);
cudaMalloc((void**)&deviceFinalX,finalLengthX*sizeof(float));
//copy the host vector to device.
cudaMemcpy(deviceA,A,chunkSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceB,B,chunkSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceC,C,chunkSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceD,D,chunkSize,cudaMemcpyHostToDevice);
//deviceA, deviceB, deviceC, deviceD is designed to be the global memory of cuda.
//forward
for(int j=1;j<EXPO;j++)
{
//the lock size should change, the first step it will need 2^(n-j)-1, so first step will be 3 if n=3
dim3 dimGrid(1,1);
int blockRow=pow(2,(EXPO-j)/2);
//printf("blockrow is :%d \n",blockRow);
int blockColumn=pow(2,EXPO-j-(EXPO-j)/2);
//printf("blockColumn is :%d \n",blockColumn);
dim3 dimBlock(blockColumn,blockRow);
//in each step the processor being used should decrease should be 2^(n-j)-1 in jth step
CalculatePArrayKernel<<<dimGrid,dimBlock>>>(j,blockRow,blockColumn,deviceA,deviceB,deviceC,deviceD);
}
//backward
//copy the device vector to host
cudaMemcpy(A,deviceA,chunkSize,cudaMemcpyDeviceToHost);
cudaMemcpy(B,deviceB,chunkSize,cudaMemcpyDeviceToHost);
cudaMemcpy(C,deviceC,chunkSize,cudaMemcpyDeviceToHost);
cudaMemcpy(D,deviceD,chunkSize,cudaMemcpyDeviceToHost);
int lastIndex=(int)pow(2,EXPO+1)-EXPO-3;
float initialValue=D[lastIndex]/B[lastIndex];
FinalX[0]=0;
FinalX[(int)pow(2,EXPO-1)]=initialValue;
printf("the value in the middle is: %f and this suppose to close to 0.5 when n goes big! \n",FinalX[(int)pow(2,EXPO-1)]);
cudaMemcpy(deviceFinalX,FinalX,finalLengthX*sizeof(float),cudaMemcpyHostToDevice);
for(int k=EXPO-1;k>=1;k--)
{
//so the most one will use 2^(n-k) variable will be covered!
dim3 dimGrid(1,1);
int blockRow=pow(2,(EXPO-k)/2);
int blockColumn=pow(2,EXPO-k-(EXPO-k)/2);
dim3 dimBlock(blockColumn,blockRow);
BackwardKernel<<<dimGrid,dimBlock>>>(k,blockRow,blockColumn,deviceA,deviceB,deviceC,deviceD,deviceFinalX,initialValue);
}
cudaMemcpy(FinalX,deviceFinalX,finalLengthX*sizeof(float),cudaMemcpyDeviceToHost);
printf(" \n");
printf(" A \n");
for(int i=0;i<chunkLength;i++)
{
if(i%8==0)
{
printf("\n");
}
printf("%f ",A[i]);
}
printf(" \n");
printf(" B \n");
for(int i=0;i<chunkLength;i++)
{
if(i%8==0)
{
printf("\n");
}
printf("%f ",B[i]);
}
printf(" \n");
printf(" C \n");
for(int i=0;i<chunkLength;i++)
{
if(i%8==0)
{
printf("\n");
}
printf("%f ",C[i]);
}
printf(" \n");
printf(" D \n");
for(int i=0;i<chunkLength;i++)
{
if(i%8==0)
{
printf("\n");
}
printf("%f ",D[i]);
}
double time_spent;
end=clock();
time_spent=(double)(end-begin)/CLOCKS_PER_SEC;
printf("\n the following are the solutions.");
for(int i=0;i<finalLengthX;i++)
{
if(i%8==0)
{
printf("\n");
}
printf("%f ",FinalX[i]);
}
printf("\n time used to calculate this is :%f seconds \n",time_spent);
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
cudaFree(deviceD);
free(A);
free(B);
free(C);
free(D);
return 0;
}
|
fcf4cb8ffc891b528c2185c8499fc736b025d828.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2016 Fixstars Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http ://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <cstdio>
#include <libsgm.h>
#include "winner_takes_all.hpp"
#include "utility.hpp"
namespace sgm {
namespace {
static constexpr unsigned int WARPS_PER_BLOCK = 8u;
static constexpr unsigned int BLOCK_SIZE = WARPS_PER_BLOCK * WARP_SIZE;
__device__ inline uint32_t pack_cost_index(uint32_t cost, uint32_t index){
union {
uint32_t uint32;
ushort2 uint16x2;
} u;
u.uint16x2.x = static_cast<uint16_t>(index);
u.uint16x2.y = static_cast<uint16_t>(cost);
return u.uint32;
}
__device__ uint32_t unpack_cost(uint32_t packed){
return packed >> 16;
}
__device__ int unpack_index(uint32_t packed){
return packed & 0xffffu;
}
using ComputeDisparity = uint32_t(*)(uint32_t, uint32_t, uint16_t*);
__device__ inline uint32_t compute_disparity_normal(uint32_t disp, uint32_t cost = 0, uint16_t* smem = nullptr)
{
return disp;
}
template <size_t MAX_DISPARITY>
__device__ inline uint32_t compute_disparity_subpixel(uint32_t disp, uint32_t cost, uint16_t* smem)
{
int subp = disp;
subp <<= sgm::StereoSGM::SUBPIXEL_SHIFT;
if (disp > 0 && disp < MAX_DISPARITY - 1) {
const int left = smem[disp - 1];
const int right = smem[disp + 1];
const int numer = left - right;
const int denom = left - 2 * cost + right;
subp += ((numer << sgm::StereoSGM::SUBPIXEL_SHIFT) + denom) / (2 * denom);
}
return subp;
}
template <unsigned int MAX_DISPARITY, unsigned int NUM_PATHS, ComputeDisparity compute_disparity = compute_disparity_normal>
__global__ void winner_takes_all_kernel(
output_type *left_dest,
output_type *right_dest,
const cost_type *src,
int width,
int height,
int pitch,
float uniqueness)
{
static const unsigned int ACCUMULATION_PER_THREAD = 16u;
static const unsigned int REDUCTION_PER_THREAD = MAX_DISPARITY / WARP_SIZE;
static const unsigned int ACCUMULATION_INTERVAL = ACCUMULATION_PER_THREAD / REDUCTION_PER_THREAD;
static const unsigned int UNROLL_DEPTH =
(REDUCTION_PER_THREAD > ACCUMULATION_INTERVAL)
? REDUCTION_PER_THREAD
: ACCUMULATION_INTERVAL;
const unsigned int cost_step = MAX_DISPARITY * width * height;
const unsigned int warp_id = threadIdx.x / WARP_SIZE;
const unsigned int lane_id = threadIdx.x % WARP_SIZE;
const unsigned int y = blockIdx.x * WARPS_PER_BLOCK + warp_id;
src += y * MAX_DISPARITY * width;
left_dest += y * pitch;
right_dest += y * pitch;
if(y >= height){
return;
}
__shared__ uint16_t smem_cost_sum[WARPS_PER_BLOCK][ACCUMULATION_INTERVAL][MAX_DISPARITY];
uint32_t right_best[REDUCTION_PER_THREAD];
for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){
right_best[i] = 0xffffffffu;
}
for(unsigned int x0 = 0; x0 < width; x0 += UNROLL_DEPTH){
#pragma unroll
for(unsigned int x1 = 0; x1 < UNROLL_DEPTH; ++x1){
if(x1 % ACCUMULATION_INTERVAL == 0){
const unsigned int k = lane_id * ACCUMULATION_PER_THREAD;
const unsigned int k_hi = k / MAX_DISPARITY;
const unsigned int k_lo = k % MAX_DISPARITY;
const unsigned int x = x0 + x1 + k_hi;
if(x < width){
const unsigned int offset = x * MAX_DISPARITY + k_lo;
uint32_t sum[ACCUMULATION_PER_THREAD];
for(unsigned int i = 0; i < ACCUMULATION_PER_THREAD; ++i){
sum[i] = 0;
}
for(unsigned int p = 0; p < NUM_PATHS; ++p){
uint32_t load_buffer[ACCUMULATION_PER_THREAD];
load_uint8_vector<ACCUMULATION_PER_THREAD>(
load_buffer, &src[p * cost_step + offset]);
for(unsigned int i = 0; i < ACCUMULATION_PER_THREAD; ++i){
sum[i] += load_buffer[i];
}
}
store_uint16_vector<ACCUMULATION_PER_THREAD>(
&smem_cost_sum[warp_id][k_hi][k_lo], sum);
}
#if TORCH_HIP_VERSION >= 9000
__syncwarp();
#else
__threadfence_block();
#endif
}
const unsigned int x = x0 + x1;
if(x < width){
// Load sum of costs
const unsigned int smem_x = x1 % ACCUMULATION_INTERVAL;
const unsigned int k0 = lane_id * REDUCTION_PER_THREAD;
uint32_t local_cost_sum[REDUCTION_PER_THREAD];
load_uint16_vector<REDUCTION_PER_THREAD>(
local_cost_sum, &smem_cost_sum[warp_id][smem_x][k0]);
// Pack sum of costs and dispairty
uint32_t local_packed_cost[REDUCTION_PER_THREAD];
for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){
local_packed_cost[i] = pack_cost_index(local_cost_sum[i], k0 + i);
}
// Update left
uint32_t best = 0xffffffffu;
for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){
best = min(best, local_packed_cost[i]);
}
best = subgroup_min<WARP_SIZE>(best, 0xffffffffu);
// Update right
#pragma unroll
for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){
const unsigned int k = lane_id * REDUCTION_PER_THREAD + i;
const int p = static_cast<int>(((x - k) & ~(MAX_DISPARITY - 1)) + k);
const unsigned int d = static_cast<unsigned int>(x - p);
#if TORCH_HIP_VERSION >= 9000
const uint32_t recv = __shfl_sync(0xffffffffu,
local_packed_cost[(REDUCTION_PER_THREAD - i + x1) % REDUCTION_PER_THREAD],
d / REDUCTION_PER_THREAD,
WARP_SIZE);
#else
const uint32_t recv = __shfl(
local_packed_cost[(REDUCTION_PER_THREAD - i + x1) % REDUCTION_PER_THREAD],
d / REDUCTION_PER_THREAD,
WARP_SIZE);
#endif
right_best[i] = min(right_best[i], recv);
if(d == MAX_DISPARITY - 1){
if(0 <= p){
right_dest[p] = compute_disparity_normal(unpack_index(right_best[i]));
}
right_best[i] = 0xffffffffu;
}
}
// Resume updating left to avoid execution dependency
const uint32_t bestCost = unpack_cost(best);
const int bestDisp = unpack_index(best);
bool uniq = true;
for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){
const uint32_t x = local_packed_cost[i];
const bool uniq1 = unpack_cost(x) * uniqueness >= bestCost;
const bool uniq2 = abs(unpack_index(x) - bestDisp) <= 1;
uniq &= uniq1 || uniq2;
}
uniq = subgroup_and<WARP_SIZE>(uniq, 0xffffffffu);
if(lane_id == 0){
left_dest[x] = uniq ? compute_disparity(bestDisp, bestCost, smem_cost_sum[warp_id][smem_x]) : INVALID_DISP;
}
}
}
}
for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){
const unsigned int k = lane_id * REDUCTION_PER_THREAD + i;
const int p = static_cast<int>(((width - k) & ~(MAX_DISPARITY - 1)) + k);
if(0 <= p && p < width){
right_dest[p] = compute_disparity_normal(unpack_index(right_best[i]));
}
}
}
template <size_t MAX_DISPARITY>
void enqueue_winner_takes_all(
output_type *left_dest,
output_type *right_dest,
const cost_type *src,
int width,
int height,
int pitch,
float uniqueness,
bool subpixel,
PathType path_type,
hipStream_t stream)
{
const int gdim =
(height + WARPS_PER_BLOCK - 1) / WARPS_PER_BLOCK;
const int bdim = BLOCK_SIZE;
if (subpixel && path_type == PathType::SCAN_8PATH) {
hipLaunchKernelGGL(( winner_takes_all_kernel<MAX_DISPARITY, 8, compute_disparity_subpixel<MAX_DISPARITY>>), dim3(gdim), dim3(bdim), 0, stream,
left_dest, right_dest, src, width, height, pitch, uniqueness);
} else if (subpixel && path_type == PathType::SCAN_4PATH) {
hipLaunchKernelGGL(( winner_takes_all_kernel<MAX_DISPARITY, 4, compute_disparity_subpixel<MAX_DISPARITY>>), dim3(gdim), dim3(bdim), 0, stream,
left_dest, right_dest, src, width, height, pitch, uniqueness);
} else if (!subpixel && path_type == PathType::SCAN_8PATH) {
hipLaunchKernelGGL(( winner_takes_all_kernel<MAX_DISPARITY, 8, compute_disparity_normal>), dim3(gdim), dim3(bdim), 0, stream,
left_dest, right_dest, src, width, height, pitch, uniqueness);
} else /* if (!subpixel && path_type == PathType::SCAN_4PATH) */ {
hipLaunchKernelGGL(( winner_takes_all_kernel<MAX_DISPARITY, 4, compute_disparity_normal>), dim3(gdim), dim3(bdim), 0, stream,
left_dest, right_dest, src, width, height, pitch, uniqueness);
}
}
}
template <size_t MAX_DISPARITY>
WinnerTakesAll<MAX_DISPARITY>::WinnerTakesAll()
: m_left_buffer()
, m_right_buffer()
{ }
template <size_t MAX_DISPARITY>
void WinnerTakesAll<MAX_DISPARITY>::enqueue(
const cost_type *src,
int width,
int height,
int pitch,
float uniqueness,
bool subpixel,
PathType path_type,
hipStream_t stream)
{
if(m_left_buffer.size() != static_cast<size_t>(pitch * height)){
m_left_buffer = DeviceBuffer<output_type>(pitch * height);
}
if(m_right_buffer.size() != static_cast<size_t>(pitch * height)){
m_right_buffer = DeviceBuffer<output_type>(pitch * height);
}
enqueue_winner_takes_all<MAX_DISPARITY>(
m_left_buffer.data(),
m_right_buffer.data(),
src,
width,
height,
pitch,
uniqueness,
subpixel,
path_type,
stream);
}
template <size_t MAX_DISPARITY>
void WinnerTakesAll<MAX_DISPARITY>::enqueue(
output_type* left,
output_type* right,
const cost_type *src,
int width,
int height,
int pitch,
float uniqueness,
bool subpixel,
PathType path_type,
hipStream_t stream)
{
enqueue_winner_takes_all<MAX_DISPARITY>(
left,
right,
src,
width,
height,
pitch,
uniqueness,
subpixel,
path_type,
stream);
}
template class WinnerTakesAll< 64>;
template class WinnerTakesAll<128>;
template class WinnerTakesAll<256>;
}
| fcf4cb8ffc891b528c2185c8499fc736b025d828.cu | /*
Copyright 2016 Fixstars Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http ://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <cstdio>
#include <libsgm.h>
#include "winner_takes_all.hpp"
#include "utility.hpp"
namespace sgm {
namespace {
static constexpr unsigned int WARPS_PER_BLOCK = 8u;
static constexpr unsigned int BLOCK_SIZE = WARPS_PER_BLOCK * WARP_SIZE;
__device__ inline uint32_t pack_cost_index(uint32_t cost, uint32_t index){
union {
uint32_t uint32;
ushort2 uint16x2;
} u;
u.uint16x2.x = static_cast<uint16_t>(index);
u.uint16x2.y = static_cast<uint16_t>(cost);
return u.uint32;
}
__device__ uint32_t unpack_cost(uint32_t packed){
return packed >> 16;
}
__device__ int unpack_index(uint32_t packed){
return packed & 0xffffu;
}
using ComputeDisparity = uint32_t(*)(uint32_t, uint32_t, uint16_t*);
__device__ inline uint32_t compute_disparity_normal(uint32_t disp, uint32_t cost = 0, uint16_t* smem = nullptr)
{
return disp;
}
template <size_t MAX_DISPARITY>
__device__ inline uint32_t compute_disparity_subpixel(uint32_t disp, uint32_t cost, uint16_t* smem)
{
int subp = disp;
subp <<= sgm::StereoSGM::SUBPIXEL_SHIFT;
if (disp > 0 && disp < MAX_DISPARITY - 1) {
const int left = smem[disp - 1];
const int right = smem[disp + 1];
const int numer = left - right;
const int denom = left - 2 * cost + right;
subp += ((numer << sgm::StereoSGM::SUBPIXEL_SHIFT) + denom) / (2 * denom);
}
return subp;
}
template <unsigned int MAX_DISPARITY, unsigned int NUM_PATHS, ComputeDisparity compute_disparity = compute_disparity_normal>
__global__ void winner_takes_all_kernel(
output_type *left_dest,
output_type *right_dest,
const cost_type *src,
int width,
int height,
int pitch,
float uniqueness)
{
static const unsigned int ACCUMULATION_PER_THREAD = 16u;
static const unsigned int REDUCTION_PER_THREAD = MAX_DISPARITY / WARP_SIZE;
static const unsigned int ACCUMULATION_INTERVAL = ACCUMULATION_PER_THREAD / REDUCTION_PER_THREAD;
static const unsigned int UNROLL_DEPTH =
(REDUCTION_PER_THREAD > ACCUMULATION_INTERVAL)
? REDUCTION_PER_THREAD
: ACCUMULATION_INTERVAL;
const unsigned int cost_step = MAX_DISPARITY * width * height;
const unsigned int warp_id = threadIdx.x / WARP_SIZE;
const unsigned int lane_id = threadIdx.x % WARP_SIZE;
const unsigned int y = blockIdx.x * WARPS_PER_BLOCK + warp_id;
src += y * MAX_DISPARITY * width;
left_dest += y * pitch;
right_dest += y * pitch;
if(y >= height){
return;
}
__shared__ uint16_t smem_cost_sum[WARPS_PER_BLOCK][ACCUMULATION_INTERVAL][MAX_DISPARITY];
uint32_t right_best[REDUCTION_PER_THREAD];
for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){
right_best[i] = 0xffffffffu;
}
for(unsigned int x0 = 0; x0 < width; x0 += UNROLL_DEPTH){
#pragma unroll
for(unsigned int x1 = 0; x1 < UNROLL_DEPTH; ++x1){
if(x1 % ACCUMULATION_INTERVAL == 0){
const unsigned int k = lane_id * ACCUMULATION_PER_THREAD;
const unsigned int k_hi = k / MAX_DISPARITY;
const unsigned int k_lo = k % MAX_DISPARITY;
const unsigned int x = x0 + x1 + k_hi;
if(x < width){
const unsigned int offset = x * MAX_DISPARITY + k_lo;
uint32_t sum[ACCUMULATION_PER_THREAD];
for(unsigned int i = 0; i < ACCUMULATION_PER_THREAD; ++i){
sum[i] = 0;
}
for(unsigned int p = 0; p < NUM_PATHS; ++p){
uint32_t load_buffer[ACCUMULATION_PER_THREAD];
load_uint8_vector<ACCUMULATION_PER_THREAD>(
load_buffer, &src[p * cost_step + offset]);
for(unsigned int i = 0; i < ACCUMULATION_PER_THREAD; ++i){
sum[i] += load_buffer[i];
}
}
store_uint16_vector<ACCUMULATION_PER_THREAD>(
&smem_cost_sum[warp_id][k_hi][k_lo], sum);
}
#if CUDA_VERSION >= 9000
__syncwarp();
#else
__threadfence_block();
#endif
}
const unsigned int x = x0 + x1;
if(x < width){
// Load sum of costs
const unsigned int smem_x = x1 % ACCUMULATION_INTERVAL;
const unsigned int k0 = lane_id * REDUCTION_PER_THREAD;
uint32_t local_cost_sum[REDUCTION_PER_THREAD];
load_uint16_vector<REDUCTION_PER_THREAD>(
local_cost_sum, &smem_cost_sum[warp_id][smem_x][k0]);
// Pack sum of costs and dispairty
uint32_t local_packed_cost[REDUCTION_PER_THREAD];
for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){
local_packed_cost[i] = pack_cost_index(local_cost_sum[i], k0 + i);
}
// Update left
uint32_t best = 0xffffffffu;
for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){
best = min(best, local_packed_cost[i]);
}
best = subgroup_min<WARP_SIZE>(best, 0xffffffffu);
// Update right
#pragma unroll
for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){
const unsigned int k = lane_id * REDUCTION_PER_THREAD + i;
const int p = static_cast<int>(((x - k) & ~(MAX_DISPARITY - 1)) + k);
const unsigned int d = static_cast<unsigned int>(x - p);
#if CUDA_VERSION >= 9000
const uint32_t recv = __shfl_sync(0xffffffffu,
local_packed_cost[(REDUCTION_PER_THREAD - i + x1) % REDUCTION_PER_THREAD],
d / REDUCTION_PER_THREAD,
WARP_SIZE);
#else
const uint32_t recv = __shfl(
local_packed_cost[(REDUCTION_PER_THREAD - i + x1) % REDUCTION_PER_THREAD],
d / REDUCTION_PER_THREAD,
WARP_SIZE);
#endif
right_best[i] = min(right_best[i], recv);
if(d == MAX_DISPARITY - 1){
if(0 <= p){
right_dest[p] = compute_disparity_normal(unpack_index(right_best[i]));
}
right_best[i] = 0xffffffffu;
}
}
// Resume updating left to avoid execution dependency
const uint32_t bestCost = unpack_cost(best);
const int bestDisp = unpack_index(best);
bool uniq = true;
for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){
const uint32_t x = local_packed_cost[i];
const bool uniq1 = unpack_cost(x) * uniqueness >= bestCost;
const bool uniq2 = abs(unpack_index(x) - bestDisp) <= 1;
uniq &= uniq1 || uniq2;
}
uniq = subgroup_and<WARP_SIZE>(uniq, 0xffffffffu);
if(lane_id == 0){
left_dest[x] = uniq ? compute_disparity(bestDisp, bestCost, smem_cost_sum[warp_id][smem_x]) : INVALID_DISP;
}
}
}
}
for(unsigned int i = 0; i < REDUCTION_PER_THREAD; ++i){
const unsigned int k = lane_id * REDUCTION_PER_THREAD + i;
const int p = static_cast<int>(((width - k) & ~(MAX_DISPARITY - 1)) + k);
if(0 <= p && p < width){
right_dest[p] = compute_disparity_normal(unpack_index(right_best[i]));
}
}
}
template <size_t MAX_DISPARITY>
void enqueue_winner_takes_all(
output_type *left_dest,
output_type *right_dest,
const cost_type *src,
int width,
int height,
int pitch,
float uniqueness,
bool subpixel,
PathType path_type,
cudaStream_t stream)
{
const int gdim =
(height + WARPS_PER_BLOCK - 1) / WARPS_PER_BLOCK;
const int bdim = BLOCK_SIZE;
if (subpixel && path_type == PathType::SCAN_8PATH) {
winner_takes_all_kernel<MAX_DISPARITY, 8, compute_disparity_subpixel<MAX_DISPARITY>><<<gdim, bdim, 0, stream>>>(
left_dest, right_dest, src, width, height, pitch, uniqueness);
} else if (subpixel && path_type == PathType::SCAN_4PATH) {
winner_takes_all_kernel<MAX_DISPARITY, 4, compute_disparity_subpixel<MAX_DISPARITY>><<<gdim, bdim, 0, stream>>>(
left_dest, right_dest, src, width, height, pitch, uniqueness);
} else if (!subpixel && path_type == PathType::SCAN_8PATH) {
winner_takes_all_kernel<MAX_DISPARITY, 8, compute_disparity_normal><<<gdim, bdim, 0, stream>>>(
left_dest, right_dest, src, width, height, pitch, uniqueness);
} else /* if (!subpixel && path_type == PathType::SCAN_4PATH) */ {
winner_takes_all_kernel<MAX_DISPARITY, 4, compute_disparity_normal><<<gdim, bdim, 0, stream>>>(
left_dest, right_dest, src, width, height, pitch, uniqueness);
}
}
}
template <size_t MAX_DISPARITY>
WinnerTakesAll<MAX_DISPARITY>::WinnerTakesAll()
: m_left_buffer()
, m_right_buffer()
{ }
template <size_t MAX_DISPARITY>
void WinnerTakesAll<MAX_DISPARITY>::enqueue(
const cost_type *src,
int width,
int height,
int pitch,
float uniqueness,
bool subpixel,
PathType path_type,
cudaStream_t stream)
{
if(m_left_buffer.size() != static_cast<size_t>(pitch * height)){
m_left_buffer = DeviceBuffer<output_type>(pitch * height);
}
if(m_right_buffer.size() != static_cast<size_t>(pitch * height)){
m_right_buffer = DeviceBuffer<output_type>(pitch * height);
}
enqueue_winner_takes_all<MAX_DISPARITY>(
m_left_buffer.data(),
m_right_buffer.data(),
src,
width,
height,
pitch,
uniqueness,
subpixel,
path_type,
stream);
}
template <size_t MAX_DISPARITY>
void WinnerTakesAll<MAX_DISPARITY>::enqueue(
output_type* left,
output_type* right,
const cost_type *src,
int width,
int height,
int pitch,
float uniqueness,
bool subpixel,
PathType path_type,
cudaStream_t stream)
{
enqueue_winner_takes_all<MAX_DISPARITY>(
left,
right,
src,
width,
height,
pitch,
uniqueness,
subpixel,
path_type,
stream);
}
template class WinnerTakesAll< 64>;
template class WinnerTakesAll<128>;
template class WinnerTakesAll<256>;
}
|
28a647a97352a5f1788707ed0a97a46a3a7aacb9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Matrix multiplication: P = M * N.
* Device code.
Author: Naga Kandasamy
Date: 2/16/2017
*/
#ifndef _MATRIX_MULTIPLY_KERNEL_H_
#define _MATRIX_MULTIPLY_KERNEL_H_
#include "matrix.h"
/* Kernel uses global memory. It exhibits redundant loading of both rows and columns. */
__global__ void matrix_multiply_kernel_naive(float *P, float *M, float *N, int matrix_size)
{
/* Obtain thread index within the thread block */
int threadX = threadIdx.x;
int threadY = threadIdx.y;
/* Obtain block index within the grid */
int blockX = blockIdx.x;
int blockY = blockIdx.y;
/* Find position in matrix */
int column = blockDim.x * blockX + threadX;
int row = blockDim.y * blockY + threadY;
int k;
float P_temp, M_element, N_element;
P_temp = 0;
for (k = 0; k < matrix_size; k++) {
M_element = M[matrix_size * row + k]; /* Row elements */
N_element = N[matrix_size * k + column]; /* Column elements */
P_temp += M_element * N_element;
}
/* Write result to P. */
P[row * matrix_size + column] = P_temp;
return;
}
/* Kernel uses shared memory as the mechanism to reuse data between threads */
__global__ void matrix_multiply_kernel_optimized(float *P, float *M, float *N, int matrix_size)
{
/* Allocate shared memory for thread block */
__shared__ float Msub[TILE_SIZE][TILE_SIZE];
__shared__ float Nsub[TILE_SIZE][TILE_SIZE];
/* Obtain thread index within thread block */
int threadX = threadIdx.x;
int threadY = threadIdx.y;
/* Obtain block index within grid */
int blockX = blockIdx.x;
int blockY = blockIdx.y;
/* Find position in matrix; which is the thread to data mapping. */
int column = blockDim.x * blockX + threadX;
int row = blockDim.y * blockY + threadY;
int k = 0;
float Psub = 0.0f;
while (k < matrix_size) {
/* Check edge condtions for matrix M for this tile */
if (((k + threadX) < matrix_size) && (column < matrix_size))
Msub[threadY][threadX] = M[row * matrix_size + k + threadX];
else
Msub[threadY][threadX] = 0.0f; /* Pad out the shared memory area */
/* Check edge conditions for matrix N for this tile */
if(((k + threadY) < matrix_size) && (row < matrix_size))
Nsub[threadY][threadX] = N[(k + threadY) * matrix_size + column];
else
Nsub[threadY][threadX] = 0.0f;
/* Barrier for threads to wait while shared memory is populated by thread block */
__syncthreads();
/* Multiply row and column entries corresponding to the tile just loaded */
int i;
for (i = 0; i < TILE_SIZE; i++)
Psub += Msub[threadY][i] * Nsub[i][threadX];
__syncthreads();
k += TILE_SIZE;
}
/* Write result to P */
if (column < matrix_size && row < matrix_size)
P[row * matrix_size + column] = Psub;
}
#endif
| 28a647a97352a5f1788707ed0a97a46a3a7aacb9.cu | /* Matrix multiplication: P = M * N.
* Device code.
Author: Naga Kandasamy
Date: 2/16/2017
*/
#ifndef _MATRIX_MULTIPLY_KERNEL_H_
#define _MATRIX_MULTIPLY_KERNEL_H_
#include "matrix.h"
/* Kernel uses global memory. It exhibits redundant loading of both rows and columns. */
__global__ void matrix_multiply_kernel_naive(float *P, float *M, float *N, int matrix_size)
{
/* Obtain thread index within the thread block */
int threadX = threadIdx.x;
int threadY = threadIdx.y;
/* Obtain block index within the grid */
int blockX = blockIdx.x;
int blockY = blockIdx.y;
/* Find position in matrix */
int column = blockDim.x * blockX + threadX;
int row = blockDim.y * blockY + threadY;
int k;
float P_temp, M_element, N_element;
P_temp = 0;
for (k = 0; k < matrix_size; k++) {
M_element = M[matrix_size * row + k]; /* Row elements */
N_element = N[matrix_size * k + column]; /* Column elements */
P_temp += M_element * N_element;
}
/* Write result to P. */
P[row * matrix_size + column] = P_temp;
return;
}
/* Kernel uses shared memory as the mechanism to reuse data between threads */
__global__ void matrix_multiply_kernel_optimized(float *P, float *M, float *N, int matrix_size)
{
/* Allocate shared memory for thread block */
__shared__ float Msub[TILE_SIZE][TILE_SIZE];
__shared__ float Nsub[TILE_SIZE][TILE_SIZE];
/* Obtain thread index within thread block */
int threadX = threadIdx.x;
int threadY = threadIdx.y;
/* Obtain block index within grid */
int blockX = blockIdx.x;
int blockY = blockIdx.y;
/* Find position in matrix; which is the thread to data mapping. */
int column = blockDim.x * blockX + threadX;
int row = blockDim.y * blockY + threadY;
int k = 0;
float Psub = 0.0f;
while (k < matrix_size) {
/* Check edge condtions for matrix M for this tile */
if (((k + threadX) < matrix_size) && (column < matrix_size))
Msub[threadY][threadX] = M[row * matrix_size + k + threadX];
else
Msub[threadY][threadX] = 0.0f; /* Pad out the shared memory area */
/* Check edge conditions for matrix N for this tile */
if(((k + threadY) < matrix_size) && (row < matrix_size))
Nsub[threadY][threadX] = N[(k + threadY) * matrix_size + column];
else
Nsub[threadY][threadX] = 0.0f;
/* Barrier for threads to wait while shared memory is populated by thread block */
__syncthreads();
/* Multiply row and column entries corresponding to the tile just loaded */
int i;
for (i = 0; i < TILE_SIZE; i++)
Psub += Msub[threadY][i] * Nsub[i][threadX];
__syncthreads();
k += TILE_SIZE;
}
/* Write result to P */
if (column < matrix_size && row < matrix_size)
P[row * matrix_size + column] = Psub;
}
#endif
|
d58efbeb2ffbb5915f97a632efa0f34263f93a20.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void threshKernel(unsigned char * image, unsigned char* moddedimage, int size, int threshold)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
{
if (image[i] > threshold)
{
moddedimage[i] = 255;
}
else
{
moddedimage[i] = 0;
}
}
} | d58efbeb2ffbb5915f97a632efa0f34263f93a20.cu | #include "includes.h"
__global__ void threshKernel(unsigned char * image, unsigned char* moddedimage, int size, int threshold)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
{
if (image[i] > threshold)
{
moddedimage[i] = 255;
}
else
{
moddedimage[i] = 0;
}
}
} |
c7fed4a150292c53dd9e53a96243a1f2aa156fa4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/index_put_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cast_kernel.h"
#include "paddle/phi/kernels/funcs/index_put_utils.h"
namespace phi {
template <typename T, size_t Rank>
__global__ void index_put_cuda_kernel(const int64_t N,
const T* x,
const T* vals,
int64_t** indices,
phi::Array<int64_t, Rank> stride,
phi::Array<int64_t, Rank> shape,
int64_t is_single_val_tensor,
bool accumulate,
T* out) {
int64_t idx = threadIdx.x + blockDim.x * blockIdx.x;
int64_t cur_ix = 0;
if (idx >= N) {
return;
}
int64_t offset = 0;
for (int i = 0; i < Rank; ++i) {
cur_ix = (static_cast<int64_t>(*(indices[i] + idx)));
if (cur_ix < 0) {
cur_ix += shape[i];
}
offset += stride[i] * cur_ix;
}
if (accumulate) {
*(out + offset) += *(vals + (idx & is_single_val_tensor));
} else {
*(out + offset) = *(vals + (idx & is_single_val_tensor));
}
}
template <typename T, typename Context, size_t Rank>
void LaunchIndexPutCudaKernel(const Context& dev_ctx,
const DenseTensor& x,
const std::vector<const DenseTensor*>& indices,
const DenseTensor& value,
bool accumulate,
DenseTensor* out) {
auto* x_data = x.data<T>();
auto* val_data = value.data<T>();
bool is_initialized = out->initialized();
T* out_data = dev_ctx.template Alloc<T>(out);
if (!is_initialized) {
phi::Copy(dev_ctx, x, dev_ctx.GetPlace(), false, out);
}
auto x_dims = x.dims();
const int64_t numel = indices[0]->numel();
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, numel);
auto x_stride = phi::stride(x_dims);
phi::Array<int64_t, Rank> stride_a;
phi::Array<int64_t, Rank> shape_a;
for (size_t idx = 0; idx < Rank; ++idx) {
stride_a[idx] = x_stride[idx];
shape_a[idx] = x_dims[idx];
}
int64_t is_single_val_tensor = (value.numel() == 1) ? 0 : INT64_MAX;
auto pd_indices =
funcs::GetDevicePointerArray<int64_t, Context>(dev_ctx, indices);
hipLaunchKernelGGL(( index_put_cuda_kernel<T, Rank>)
, dim3(config.block_per_grid), dim3(config.thread_per_block), 0, dev_ctx.stream(),
numel,
x_data,
val_data,
pd_indices,
stride_a,
shape_a,
is_single_val_tensor,
accumulate,
out_data);
}
template <typename T, typename Context>
void IndexPutKernel(const Context& dev_ctx,
const DenseTensor& x,
const std::vector<const DenseTensor*>& indices,
const DenseTensor& value,
bool accumulate,
DenseTensor* out) {
PADDLE_ENFORCE_EQ(
x.dtype(),
value.dtype(),
phi::errors::InvalidArgument(
"The data type of tensor value must be same to the data type "
"of tensor x."));
PADDLE_ENFORCE_EQ(indices.empty(),
false,
phi::errors::InvalidArgument("Indices cannot be empty."));
std::vector<DenseTensor> tmp_args;
std::vector<const phi::DenseTensor*> int_indices_v =
funcs::DealWithBoolIndices<T, Context>(dev_ctx, indices, &tmp_args);
const size_t total_dims = x.dims().size();
auto bd_dim = funcs::BroadCastTensorsDims(int_indices_v);
std::vector<int64_t> res_dim_v(phi::vectorize(bd_dim));
std::vector<const phi::DenseTensor*> res_indices_v(x.dims().size(), nullptr);
std::vector<DenseTensor> tmp_res_indices_v;
std::vector<DenseTensor> tmp_value_v;
std::vector<DenseTensor> range_tensor_v;
const DenseTensor* ptr_value = nullptr;
for (int i = indices.size(); i < x.dims().size(); ++i) {
range_tensor_v.emplace_back(funcs::GetRangeCudaTensor<int64_t, Context>(
dev_ctx, x.dims()[i], phi::DataType::INT64));
}
funcs::DealWithIndices<T, Context>(dev_ctx,
x,
int_indices_v,
&res_indices_v,
&tmp_res_indices_v,
range_tensor_v,
bd_dim,
&res_dim_v);
if (value.numel() != 1) {
tmp_value_v.emplace_back(
DenseTensor(value.dtype()).Resize(phi::make_ddim(res_dim_v)));
ExpandKernel<T, Context>(
dev_ctx, value, IntArray(res_dim_v), &tmp_value_v[0]);
ptr_value = &tmp_value_v[0];
} else {
ptr_value = &value;
}
switch (total_dims) {
case 1:
LaunchIndexPutCudaKernel<T, Context, 1>(
dev_ctx, x, res_indices_v, *ptr_value, accumulate, out);
break;
case 2:
LaunchIndexPutCudaKernel<T, Context, 2>(
dev_ctx, x, res_indices_v, *ptr_value, accumulate, out);
break;
case 3:
LaunchIndexPutCudaKernel<T, Context, 3>(
dev_ctx, x, res_indices_v, *ptr_value, accumulate, out);
break;
case 4:
LaunchIndexPutCudaKernel<T, Context, 4>(
dev_ctx, x, res_indices_v, *ptr_value, accumulate, out);
break;
case 5:
LaunchIndexPutCudaKernel<T, Context, 5>(
dev_ctx, x, res_indices_v, *ptr_value, accumulate, out);
break;
case 6:
LaunchIndexPutCudaKernel<T, Context, 6>(
dev_ctx, x, res_indices_v, *ptr_value, accumulate, out);
break;
default:
PADDLE_THROW(phi::errors::InvalidArgument(
"dims of input tensor should be less than 7, But received"
"%d",
x.dims().size()));
}
}
} // namespace phi
PD_REGISTER_KERNEL(index_put,
GPU,
ALL_LAYOUT,
phi::IndexPutKernel,
float,
double,
int,
int64_t,
bool,
phi::dtype::float16) {}
| c7fed4a150292c53dd9e53a96243a1f2aa156fa4.cu | // Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/index_put_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_launch_config.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cast_kernel.h"
#include "paddle/phi/kernels/funcs/index_put_utils.h"
namespace phi {
template <typename T, size_t Rank>
__global__ void index_put_cuda_kernel(const int64_t N,
const T* x,
const T* vals,
int64_t** indices,
phi::Array<int64_t, Rank> stride,
phi::Array<int64_t, Rank> shape,
int64_t is_single_val_tensor,
bool accumulate,
T* out) {
int64_t idx = threadIdx.x + blockDim.x * blockIdx.x;
int64_t cur_ix = 0;
if (idx >= N) {
return;
}
int64_t offset = 0;
for (int i = 0; i < Rank; ++i) {
cur_ix = (static_cast<int64_t>(*(indices[i] + idx)));
if (cur_ix < 0) {
cur_ix += shape[i];
}
offset += stride[i] * cur_ix;
}
if (accumulate) {
*(out + offset) += *(vals + (idx & is_single_val_tensor));
} else {
*(out + offset) = *(vals + (idx & is_single_val_tensor));
}
}
template <typename T, typename Context, size_t Rank>
void LaunchIndexPutCudaKernel(const Context& dev_ctx,
const DenseTensor& x,
const std::vector<const DenseTensor*>& indices,
const DenseTensor& value,
bool accumulate,
DenseTensor* out) {
auto* x_data = x.data<T>();
auto* val_data = value.data<T>();
bool is_initialized = out->initialized();
T* out_data = dev_ctx.template Alloc<T>(out);
if (!is_initialized) {
phi::Copy(dev_ctx, x, dev_ctx.GetPlace(), false, out);
}
auto x_dims = x.dims();
const int64_t numel = indices[0]->numel();
auto config = phi::backends::gpu::GetGpuLaunchConfig1D(dev_ctx, numel);
auto x_stride = phi::stride(x_dims);
phi::Array<int64_t, Rank> stride_a;
phi::Array<int64_t, Rank> shape_a;
for (size_t idx = 0; idx < Rank; ++idx) {
stride_a[idx] = x_stride[idx];
shape_a[idx] = x_dims[idx];
}
int64_t is_single_val_tensor = (value.numel() == 1) ? 0 : INT64_MAX;
auto pd_indices =
funcs::GetDevicePointerArray<int64_t, Context>(dev_ctx, indices);
index_put_cuda_kernel<T, Rank>
<<<config.block_per_grid, config.thread_per_block, 0, dev_ctx.stream()>>>(
numel,
x_data,
val_data,
pd_indices,
stride_a,
shape_a,
is_single_val_tensor,
accumulate,
out_data);
}
template <typename T, typename Context>
void IndexPutKernel(const Context& dev_ctx,
const DenseTensor& x,
const std::vector<const DenseTensor*>& indices,
const DenseTensor& value,
bool accumulate,
DenseTensor* out) {
PADDLE_ENFORCE_EQ(
x.dtype(),
value.dtype(),
phi::errors::InvalidArgument(
"The data type of tensor value must be same to the data type "
"of tensor x."));
PADDLE_ENFORCE_EQ(indices.empty(),
false,
phi::errors::InvalidArgument("Indices cannot be empty."));
std::vector<DenseTensor> tmp_args;
std::vector<const phi::DenseTensor*> int_indices_v =
funcs::DealWithBoolIndices<T, Context>(dev_ctx, indices, &tmp_args);
const size_t total_dims = x.dims().size();
auto bd_dim = funcs::BroadCastTensorsDims(int_indices_v);
std::vector<int64_t> res_dim_v(phi::vectorize(bd_dim));
std::vector<const phi::DenseTensor*> res_indices_v(x.dims().size(), nullptr);
std::vector<DenseTensor> tmp_res_indices_v;
std::vector<DenseTensor> tmp_value_v;
std::vector<DenseTensor> range_tensor_v;
const DenseTensor* ptr_value = nullptr;
for (int i = indices.size(); i < x.dims().size(); ++i) {
range_tensor_v.emplace_back(funcs::GetRangeCudaTensor<int64_t, Context>(
dev_ctx, x.dims()[i], phi::DataType::INT64));
}
funcs::DealWithIndices<T, Context>(dev_ctx,
x,
int_indices_v,
&res_indices_v,
&tmp_res_indices_v,
range_tensor_v,
bd_dim,
&res_dim_v);
if (value.numel() != 1) {
tmp_value_v.emplace_back(
DenseTensor(value.dtype()).Resize(phi::make_ddim(res_dim_v)));
ExpandKernel<T, Context>(
dev_ctx, value, IntArray(res_dim_v), &tmp_value_v[0]);
ptr_value = &tmp_value_v[0];
} else {
ptr_value = &value;
}
switch (total_dims) {
case 1:
LaunchIndexPutCudaKernel<T, Context, 1>(
dev_ctx, x, res_indices_v, *ptr_value, accumulate, out);
break;
case 2:
LaunchIndexPutCudaKernel<T, Context, 2>(
dev_ctx, x, res_indices_v, *ptr_value, accumulate, out);
break;
case 3:
LaunchIndexPutCudaKernel<T, Context, 3>(
dev_ctx, x, res_indices_v, *ptr_value, accumulate, out);
break;
case 4:
LaunchIndexPutCudaKernel<T, Context, 4>(
dev_ctx, x, res_indices_v, *ptr_value, accumulate, out);
break;
case 5:
LaunchIndexPutCudaKernel<T, Context, 5>(
dev_ctx, x, res_indices_v, *ptr_value, accumulate, out);
break;
case 6:
LaunchIndexPutCudaKernel<T, Context, 6>(
dev_ctx, x, res_indices_v, *ptr_value, accumulate, out);
break;
default:
PADDLE_THROW(phi::errors::InvalidArgument(
"dims of input tensor should be less than 7, But received"
"%d",
x.dims().size()));
}
}
} // namespace phi
PD_REGISTER_KERNEL(index_put,
GPU,
ALL_LAYOUT,
phi::IndexPutKernel,
float,
double,
int,
int64_t,
bool,
phi::dtype::float16) {}
|
5124365499cfff88aa65f7cc1a42dcb57a40c539.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*****************************************************************************************
pos2doppler.c
Takes a pos_t structure and a doppler_t structure and "fills in" a dopfrm_t structure
indexed by frm. In other words, pos2doppler works from a model plane-of-sky image (with
an observer z-coordinate and a scattering angle assigned to each pixel) to produce a model
Doppler spectrum corresponding to data frame frm.
In the case of an orbiting binary system (the "orbit" action), pos2doppler only computes
power contributions from the orbiting body denoted by the "body" parameter: the routine is
called twice, once for each body.
pos2doppler takes contributions only from the rectangular plane-of-sky region defined by
pos.xlim and pos.ylim -- the smallest rectangle which completely "contains" the model in
the plane of the sky. No power is contributed by parts of the model which extend beyond
the POS window; this is why such models are heavily penalized (the objective function is
doubled -- see function f in file bestfit.c).
idoplim is updated for frame frm to show the model Doppler region that contains nonzero
power.
Modified 2016 November 14 by ME:
Implemented an all-GPU version of pos2doppler.
Modified 2015 June 3 by CM:
Implement smearing for the "fit" and "write" actions by adding "v" (view) parameter
and applying each run of pos2doppler to a single view rather than to an entire
(smeared) frame
Modified 2014 February 10 by CM:
Add "ilaw" argument to the radlaw routine to implement multiple radar scattering laws
Modified 2012 March 23 by CM:
Implement Doppler scaling
Modified 2010 September 1 by CM:
Add braces to an if-then-else statement to avoid compilation warning
Modified 2010 August 26 by CM:
For the "map" action, change the "map_forward" parameter to "map_mode"
and implement map_mode = 'facets'
For the "map" action, implement the "map_verbose" parameter
Modified 2010 June 15 by CM:
Pass the entire par_t parameter structure as an argument rather than
just selected parameters
Implement the map action
Modified 2009 November 15 by CM:
Fix argument type in a printf statement
Modified 2009 April 3 by CM:
Add the "warn_badradar" parameter (see below)
If the sinc^2 Doppler response function extends to too many Doppler
bins, set a flag and compute a factor by which the objective
function should be increased (actually the logarithm of this
factor). If the "warn_badradar" parameter is turned on, print an
explicit warning.
If the model is too wide in Doppler space even for the overflow image,
set a flag and compute a factor by which the objective function
should be increased (actually the logarithm of this factor). If
the "warn_badradar" parameter is turned on, print an explicit
warning.
Make pos2doppler int rather than void in order to return the flag
described above to the calling procedure
Modified 2007 August 4 by CM:
Add orbit_xoff, orbit_yoff, and orbit_dopoff parameters, the x offset
(POS image rows), y offset (POS image columns), and Doppler offset
(spectral bins) of the center of mass due to orbital motion.
Add body parameter to indicate (for the "orbit" action) which of the two
orbiting bodies' power contributions should be computed
Add c (component) argument to radlaw routine
Modified 2006 September 14 by CM:
If the overflow region is too small, print a warning rather than
halting the program
Modified 2006 June 21 by CM:
Change dopres to dop_per_bin
For POS renderings, change res to km_per_pixel
Modified 2006 June 18 by CM:
Allow each Doppler frame in a dataset to have different dimensions
after vignetting
Modified 2006 March 10 by CM:
Pass the "speckle" parameter so that self-noise can be included when
computing the chi squared contribution of the overflow region
Compute overflow_xsec and overflow_dopmean so that these quantities
can be used by the "delcorinit" action
Modified 2005 July 25 by CM:
Fix bug in overall cross-section scale factor: return to Scott's scheme
of normalizing the cross-section contributions from a given POS
pixel so that they sum to the cross section actually present on the
sky in that pixel
Modified 2005 July 20 by CM:
Fix bug in computing floating-point Doppler limits in Hz
Add "facet" argument to radlaw routine
Modified 2005 July 5 by CM:
Eliminate "dir" argument (since we always add power to the model image
and never subtract it)
Add "set" (set number) argument in order to improve error messages
Modified 2005 June 27 by CM:
Rename INFINITY constant to HUGENUMBER to avoid conflicts
Modified 2005 June 25 by CM:
Rename old "doplim" to "idoplim"; this is the Doppler limits in
(integer) bin numbers
Add new "doplim" which is the floating-point Doppler limits in Hz,
obtained PRIOR to convolution with the Doppler response function
Modified 2005 January 25 by CM:
Take care of uninitialized variable
Modified 2003 May 11 by CM:
Compute contributions to chi squared by model power which lies
outside the limits of the data frame.
Modified 2003 May 5 by CM:
For each POS pixel, compute the entire pixel's contribution to a
given Doppler bin in the model spectrum so long as even one point
at which we evaluate the sinc^2 response function is less than
sinc2width/2.0 bins away from that Doppler bin. In other words,
err on the side of computing too many small contributions to each
bin in the model spectrum, so as not to omit significant contributions
just because a POS pixel's *center* isn't close enough in Doppler.
Modified 2003 April 29 by CM:
Evaluate the sinc^2 Doppler response function at nsinc2 points
per POS pixel dimension, not just at the pixel center.
The sinc^2 function varies rapidly -- one null per Doppler bin
away from the central peak -- so if the pixel width is more than
about half the Doppler resolution, we want to take the mean of
several points within the pixel.
Modified 2003 April 26 by CM:
Zero out the sinc^2 Doppler response function beyond the
nearest sinc2width bins rather than beyond the nearest 2 bins
Modified 2003 April 17 by CM:
Now correctly scales the model Doppler spectrum to account for
Doppler mismatching
*****************************************************************************************/
extern "C" {
#include "head.h"
}
/* Declare __device__ vars and structs, which have file scope */
__device__ int afdop_nsinc2_sq, afdop_any_overflow, afdop_in_bounds,
afdop_badradar;
/* Note that both pos2deldop_cuda.cu and posvis_cuda.cu have the atomicMaxf
* and atomicMinf device functions defined separately. This is done due to
* the way static device functions I handled I guess. I tried putting them
* into a separate file and a declaration in the shape-cuda.h header file,
* but to no avail. So here they are, duplicated in both files. */
__device__ static float atomicMaxf(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fmaxf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
__device__ static float atomicMinf(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fminf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
__global__ void pos2doppler_init_af_krnl(struct dat_t *ddat, int set, int v,
struct dopfrm_t **frame, struct pos_t **pos, int nframes, int *ndop,
int *idop0, float3 *w, float2 *doplim, int4 *xylim) {
/* nframes-threaded kernel */
int frm = threadIdx.x;
if (frm < nframes) {
/* Initialize variables */
idop0[frm] = 0;
afdop_any_overflow = 0;
frame[frm] = &ddat->set[set].desc.doppler.frame[frm];
pos[frm] = &frame[frm]->pos;
ndop[frm] = frame[frm]->ndop;
frame[frm]->idoplim[0] = ndop[frm] + 999999;
frame[frm]->idoplim[1] = -999999;
frame[frm]->doplim[0] = HUGENUMBER;
frame[frm]->doplim[1] = -HUGENUMBER;
afdop_badradar = 0;
frame[frm]->badradar_logfactor = 0.0;
/* Get w, the apparent spin vector in observer coordinates */
dev_cotrans4(w, frame[frm]->view[v].oe, frame[frm]->view[v].spin, 1, frm);
/* Copy frame->doplim over to the float device variable */
doplim[frm].x = frame[frm]->doplim[0];
doplim[frm].y = frame[frm]->doplim[1];
/* Now get pos->xlim[0], pos->xlim[1], pos->ylim[0], pos->ylim[1] */
xylim[frm].w = pos[frm]->xlim[0];
xylim[frm].x = pos[frm]->xlim[1];
xylim[frm].y = pos[frm]->ylim[0];
xylim[frm].z = pos[frm]->ylim[1];
}
}
__global__ void pos2doppler_radar_parameters_af_krnl(struct par_t *dpar,
struct dat_t *ddat, struct dopfrm_t **frame, struct pos_t **pos,
double orbit_dopoff, int set, int nframes, int v, float2 *axay,
float2 *xyincr, float3 *w, float4 *dop, float *dopshift) {
/* nframes-threaded kernel */
int frm = threadIdx.x;
double dopfact;
if (frm < nframes) {
/* Compute the Doppler bin increment per plane-of-sky pixel westward (ax)
and northward (ay); these values are scaled by the "dopscale" parameter
for this dataset. Then compute km2Hz, the Doppler increment (Hz) per
km perpendicular to the projected spin axis in the plane of the sky. */
dopfact = ddat->set[set].desc.doppler.dopscale.val * KM2HZFACT * pos[0]->km_per_pixel
* ddat->set[set].desc.doppler.Ftx / ddat->set[set].desc.doppler.dop_per_bin;
axay[frm].x = -w[frm].y*dopfact;
axay[frm].y = w[frm].x*dopfact;
frame[frm]->view[v].km2Hz = sqrt(axay[frm].x*axay[frm].x +
axay[frm].y*axay[frm].y) * ddat->set[set].desc.doppler.dop_per_bin
/ pos[frm]->km_per_pixel;
/* Compute absolute value of the difference between maximum (or minimum)
* Doppler on any given POS pixel's edge and the Doppler at its center */
/* dop.w - dopdiff_bl
* dop.x - dopdiff_max
* dop.y - dopDC_vig
* dop.z - dop_extra */
if (w[frm].x != 0.0 || w[frm].y != 0.0)
dop[frm].z = frame[frm]->view[v].km2Hz * 0.5 * pos[frm]->km_per_pixel
* sqrt(w[frm].x*w[frm].x + w[frm].y*w[frm].y) /
MAX( fabs(w[frm].x), fabs(w[frm].y));
else
dop[frm].z = 0.0;
/* We may be evaluating the sinc^2 Doppler response function at
more than one point per POS pixel. xincr and yincr are the
Doppler bin increments between adjacent evaluation points in the
x and y directions. dopdiff_bl is the Doppler bin difference
between the bottom-leftmost (southeasternmost) evaluation point
and the pixel center. dopdiff_max is the maximum positive
Doppler bin difference between any evaluation point and the
pixel center. */
afdop_nsinc2_sq = dpar->nsinc2 * dpar->nsinc2;
xyincr[frm].x = axay[frm].x / dpar->nsinc2;
xyincr[frm].y = axay[frm].y / dpar->nsinc2;
dop[frm].w = -(dpar->nsinc2 - 1)*(xyincr[frm].x + xyincr[frm].y)/2;
dop[frm].x = (dpar->nsinc2 - 1)*(fabs(xyincr[frm].x) + fabs(xyincr[frm].x))/2;
if (2*dop[frm].x + dpar->sinc2width + 1 > MAXBINS) {
afdop_badradar = 1;
frame[frm]->badradar_logfactor += log((2*dop[frm].x + dpar->sinc2width + 1) / MAXBINS);
if (dpar->warn_badradar) {
printf("\nWARNING in pos2doppler.c for set %2d frame %2d:\n", set, frm);
printf(" sinc^2 function evaluated at %d Doppler bins, must be <= %d\n",
(int) ceil(2*dop[frm].x + dpar->sinc2width + 1), MAXBINS);
}
}
/* Get the COM Doppler bin, corrected for ephemeris drift
and adjusted for orbital motion */
dopshift[frm] = frame[frm]->dopcom_vig + frame[frm]->view[v].dopoff + orbit_dopoff;
}
}
__global__ void pos2doppler_get_global_frmsz_krnl(int *global_lim, int4 *xylim,
int nframes) {
/* nframes-threaded kernel */
int f = threadIdx.x;
if (f < nframes) {
/* Initialize global_lim */
for (int i=0; i<4; i++)
global_lim[i] = 0;
/* Now calculate minimum for all frames */
atomicMin(&global_lim[0], xylim[f].w);
atomicMax(&global_lim[1], xylim[f].x);
atomicMin(&global_lim[2], xylim[f].y);
atomicMax(&global_lim[3], xylim[f].z);
}
}
__global__ void pos2doppler_pixel_af_krnl(
struct par_t *dpar,
struct mod_t *dmod,
struct dat_t *ddat,
struct pos_t **pos,
struct dopfrm_t **frame,
int xspan, int set, int nframes, int frame_size, int total_size, int body,
double orbit_xoff, double orbit_yoff,
float2 *axay, float2 *doplim, float2 *xyincr, float4 *dop,
int *ndop, int *idop0, int *global_lim,
float *dopshift) {
/* Multi-threaded kernel */
int total_offset = blockIdx.x * blockDim.x + threadIdx.x;
int offset = total_offset % frame_size;
int frm = total_offset / frame_size;
int x = offset % xspan + global_lim[0]; // pos[frm]->xlim[0];
int y = offset / xspan + global_lim[2]; // pos[frm]->ylim[0];
int n ;
int idop, idop_min, idop_max, idop1, idop2, i, j, c, f, k, zaddr;
double tmp, amp, arg_left, sinc2arg, sinc2_mean, arg_bl, fit_contribution,
sumweights, dop_contribution[MAXBINS], dopPOS;
if ((offset < frame_size) && (frm < nframes)) {
n = pos[frm]->n;
zaddr = (y+n)*(2*n+1) + (x+n);
/* Loop through all POS pixels within the rectangular plane-of-sky
* region spanned by the model; for each such pixel which isn't blank
* sky, compute the cross-section contributions to pixels in the model
* Doppler spectrum. Note that functions posclr and posvis flag
* blank-sky pixels by assigning "cose" = cos(scattering angle) = 0.
* Only compute contributions from POS pixels that project onto the
* right body, in case this is the "orbit" action (for which this
* routine is called twice, once for each of the 2 orbiting bodies).*/
if (pos[frm]->cose_s[zaddr] > 0.0 && pos[frm]->body[x][y] == body) {
/* Get the fp Doppler bin of POS pixel center: dopPOS. Also get the
* min and max int Doppler bins to which this pixel contributes
* power: idop_min and idop_max. Each POS pixel contributes power
* to *all* Doppler bins, but here we're zeroing out the sinc^2
* response function beyond the nearest sinc2width bins.
* Actually, if nsinc2 > 1, we'll distribute power to *at least*
* sinc2width Doppler bins: For pixels which span multiple bins
* we'll err on the side of computing more contributions rather
* than fewer. */
/* dop.w - dopdiff_bl
* dop.x - dopdiff_max
* dop.y - dopDC_vig
* dop.z - dop_extra */
dopPOS = axay[frm].x*(x - orbit_xoff) + axay[frm].y*(y - orbit_yoff) + dopshift[frm];
idop_min = (int) floor(dopPOS - dop[frm].x + 1 - dpar->sinc2width/2.0);
idop_max = (int) floor(dopPOS + dop[frm].x + dpar->sinc2width/2.0);
/* Update the rectangular delay-Doppler region with nonzero power
* according to the model */
atomicMin(&frame[frm]->idoplim[0], idop_min);
atomicMax(&frame[frm]->idoplim[1], idop_max);
/* Update model's fp Doppler limits, as determined prior to
* convolution with the Doppler response function. At this point
* in the code, doplim is a pair of floating-point bin numbers
* which applies to POS pixel centers; when the loop over POS
* pixels is finished we will convert these limits to Hz and will
* widen the limits to account for nonzero POS pixel width. */
/* Note that p2d_doplim[2] is a single-precision (float) copy of
* the original p2d_frame->doplim[2] (double-precision). This is
* necessary to get atomic operations to work. */
atomicMinf(&doplim[frm].x, dopPOS);
atomicMaxf(&doplim[frm].y, dopPOS);
/* Check if all Doppler bins which will receive power from this POS
* pixel fall within the data frame; if not, initialize the
* "overflow" spectrum if necessary. */
if ( (idop_min >= 1) && (idop_max <= ndop[frm]) )
afdop_in_bounds = 1;
else {
afdop_in_bounds = 0;
if (!afdop_any_overflow) {
afdop_any_overflow = 1;
for (j=0; j<MAXOVERFLOW; j++)
frame[frm]->fit_overflow[j] = 0.0; // To-Do: This might need attention.
/* Center the COM in the overflow spectrum: bin [idop] in
* the fit frame corresponds to bin [idop+idop0] in the
* fit_overflow frame. */
idop0[frm] = MAXOVERFLOW/2 - (int) floor(dopshift[frm] + 0.5);
}
}
/* Compute the sinc^2 factors for Doppler mismatching: Take the
* mean of nsinc2^2 points interior to the POS pixel. Do the two
* most common cases (nsinc2 = 1 or 2) without loops to gain speed.
* Note the SINC2 macro multiplies its argument by pi. Then add the
* cross-section contributions to the model spectrum. */
for (idop=idop_min; idop<=idop_max; idop++) {
switch (dpar->nsinc2) {
case 1:
sinc2_mean = SINC2(dopPOS - idop);
break;
case 2:
arg_bl = dopPOS + dop[frm].w - idop; /* bl = bottom left */
sinc2_mean = (SINC2(arg_bl) +
SINC2(arg_bl+xyincr[frm].x) +
SINC2(arg_bl+xyincr[frm].y) +
SINC2(arg_bl+xyincr[frm].x+xyincr[frm].y)) / 4;
break;
default:
arg_left = dopPOS + dop[frm].w - idop;
sinc2_mean = 0.0;
for (i=0; i<dpar->nsinc2; i++) {
sinc2arg = arg_left;
for (j=0; j<dpar->nsinc2; j++) {
sinc2_mean += SINC2(sinc2arg);
sinc2arg += xyincr[frm].x;
}
arg_left += xyincr[frm].y;
}
sinc2_mean /= afdop_nsinc2_sq;
break;
}
k = MIN( idop - idop_min, MAXBINS);
dop_contribution[k] = sinc2_mean;
}
/* Compute the sum of Doppler weighting factors */
sumweights = 0.0;
for (idop=idop_min; idop<=idop_max; idop++) {
k = MIN( idop - idop_min, MAXBINS);
sumweights += dop_contribution[k];
}
/* The radar cross section within this plane-of-sky pixel is
* [differential radar scattering law]*[POS pixel area in km^2].
* The differential radar scattering law (function radlaw
* = d[cross section]/d[area] ) includes a sec(theta) factor to
* account for the fact that the POS pixel area is projected area
* rather than physical area on the target surface. */
amp = dev_radlaw(&dmod->photo, ddat->set[set].desc.doppler.iradlaw,
pos[frm]->cose_s[zaddr], pos[frm]->comp[x][y], pos[frm]->f[x][y])
* pos[frm]->km_per_pixel * pos[frm]->km_per_pixel / sumweights;
/* Only add POS pixel's power contributions to model Doppler spect-
* rum if NONE of those contributions fall outside spectrum limits*/
if (afdop_in_bounds) {
/* Add the cross-section contributions to the model frame */
for (idop=idop_min; idop<=idop_max; idop++) {
k = MIN( idop - idop_min, MAXBINS);
fit_contribution = amp * dop_contribution[k];
atomicAdd(&ddat->set[set].desc.doppler.frame[frm].fit_s[idop],
fit_contribution);
if (dpar->action == MAP) {
if (dpar->map_mode == MAPMODE_DELDOP) {
if (frame[frm]->map_fit[idop] > 0.0) {
frame[frm]->map_pos[x][y] += fit_contribution;
c = pos[frm]->comp[x][y];
f = pos[frm]->f[x][y];
frame[frm]->map_facet_power[c][f] += fit_contribution;
if (dpar->map_verbose)
printf("# POS (%3d, %3d) comp %d facet %4d contributes %e to Dop (%3d)\n",
x+n, y+n, c, f, fit_contribution, idop-1);
}
} else if (dpar->map_mode == MAPMODE_POS) {
if (frame[frm]->map_pos[x][y] > 0.0) {
frame[frm]->map_fit[idop] += fit_contribution;
c = pos[frm]->comp[x][y];
f = pos[frm]->f[x][y];
frame[frm]->map_facet_power[c][f] += fit_contribution;
if (dpar->map_verbose)
printf("# POS (%3d, %3d) comp %d facet %4d contributes %e to Dop (%3d)\n",
x+n, y+n, c, f, fit_contribution, idop-1);
}
} else {
if (frame[frm]->map_pos[x][y] > 0.0) {
frame[frm]->map_fit[idop] += fit_contribution;
if (dpar->map_verbose) {
c = pos[frm]->comp[x][y];
f = pos[frm]->f[x][y];
printf("# POS (%3d, %3d) comp %d facet %4d contributes %e to Dop (%3d)\n",
x+n, y+n, c, f, fit_contribution, idop-1);
}
}
}
}
}
} else {
/* Add the cross-section contributions to the "overflow" spectrum */
if (dpar->action == MAP && dpar->map_mode != MAPMODE_DELDOP)
if (frame[frm]->map_pos[x][y] > 0.0)
dpar->map_overflow = 1;
idop1 = MAX( idop_min, -idop0[frm]);
idop2 = MIN( idop_max, -idop0[frm] + MAXOVERFLOW - 1);
for (idop=idop1; idop<=idop2; idop++) {
k = MIN( idop - idop_min, MAXBINS);
fit_contribution = amp * dop_contribution[k];
frame[frm]->fit_overflow[idop+idop0[frm]] += fit_contribution; // might need atomics
if (dpar->action == MAP && dpar->map_mode == MAPMODE_DELDOP)
if (idop >= dpar->map_doplim[0] && idop <= dpar->map_doplim[1]) {
frame[frm]->map_pos[x][y] += fit_contribution;
c = pos[frm]->comp[x][y];
f = pos[frm]->f[x][y];
frame[frm]->map_facet_power[c][f] += fit_contribution;
if (dpar->map_verbose)
printf("# POS (%3d, %3d) comp %d facet %4d contributes %e to Dop (%3d)\n",
x+n, y+n, c, f, fit_contribution, idop-1);
}
}
}
} /* if cos(scattering angle) > 0 and POS pixel projects onto the right body */
}
}
__global__ void pos2doppler_finish_krnl(struct par_t *dpar, struct dat_t *ddat,
struct dopfrm_t **frame, float4 *dop, float2 *doplim, float *dopshift,
int *idop0, int *ndop, int set, int nframes) {
/* Single-threaded kernel */
int frm = threadIdx.x;
int j, j1, j2;
double lookfact, sdev_sq, variance, dopfactor;
if (threadIdx.x <nframes) {
/* Copy float device variable over to the frame->doplim */
frame[frm]->doplim[0] = doplim[frm].x;
frame[frm]->doplim[1] = doplim[frm].y;
/* Convert model's Doppler limits from float bin numbers to Hz and
* widen the limits to account for nonzero POS pixel width */
frame[frm]->doplim[0] = (frame[frm]->doplim[0] - dopshift[frm])*
ddat->set[set].desc.doppler.dop_per_bin - dop[frm].z;
frame[frm]->doplim[1] = (frame[frm]->doplim[1] - dopshift[frm])*
ddat->set[set].desc.doppler.dop_per_bin + dop[frm].z;
/* Calculate overflow contributions to chi squared:
* o2 = obs^2 contribution, m2 = model^2 contribution.
* Also compute summed cross section and mean Doppler bin for overflow
* region, for use with the "delcorinit" action */
frame[frm]->overflow_o2 = 0.0;
frame[frm]->overflow_m2 = 0.0;
frame[frm]->overflow_xsec = 0.0;
frame[frm]->overflow_dopmean = 0.0;
sdev_sq = frame[frm]->sdev*frame[frm]->sdev;
variance = sdev_sq;
lookfact = (frame[frm]->nlooks > 0.0) ? 1.0/frame[frm]->nlooks : 0.0;
if (afdop_any_overflow) {
j1 = MAX(frame[frm]->idoplim[0] + idop0[frm], 0);
j2 = MIN(frame[frm]->idoplim[1] + idop0[frm], MAXOVERFLOW - 1);
for (j=j1; j<=j2; j++) {
if (frame[frm]->fit_overflow[j] != 0.0) {
if (dpar->speckle)
variance = sdev_sq + lookfact*frame[frm]->fit_overflow[j]*
frame[frm]->fit_overflow[j];
frame[frm]->overflow_o2 += 1.0;
frame[frm]->overflow_m2 += frame[frm]->fit_overflow[j]*
frame[frm]->fit_overflow[j]/variance;
frame[frm]->overflow_xsec += frame[frm]->fit_overflow[j];
frame[frm]->overflow_dopmean += (j - idop0[frm])*frame[frm]->fit_overflow[j];
}
}
if (frame[frm]->overflow_xsec != 0.0)
frame[frm]->overflow_dopmean /= frame[frm]->overflow_xsec;
/* Print a warning if the model extends even beyond the overflow spectrum */
if ( ((frame[frm]->idoplim[0] + idop0[frm]) < 0) ||
((frame[frm]->idoplim[1] + idop0[frm]) >= MAXOVERFLOW) ) {
afdop_badradar = 1;
dopfactor = (MAX(frame[frm]->idoplim[1] + idop0[frm], MAXOVERFLOW)
- MIN(frame[frm]->idoplim[0] + idop0[frm], 0) )
/ (1.0*MAXOVERFLOW);
frame[frm]->badradar_logfactor += log(dopfactor);
if (dpar->warn_badradar) {
printf("\nWARNING in pos2doppler.c for set %2d frame %2d:\n", set, frm);
printf(" model Doppler spectrum extends too far beyond the data spectrum\n");
printf(" data: bins %2d to %2d\n", 1, ndop[frm]);
printf(" model: bins %2d to %2d\n",
frame[frm]->idoplim[0], frame[frm]->idoplim[1]);
}
}
}
}
}
__host__ int pos2doppler_cuda_af( struct par_t *dpar, struct mod_t *dmod,
struct dat_t *ddat, double orbit_xoff, double orbit_yoff, double
orbit_dopoff, int body, int set, int nframes, int v)
{
int badradar, xspan, yspan, nThreads, frmsz, *global_lim, *idop0, *ndop;
dim3 BLK, THD;
struct dopfrm_t **frame;
struct pos_t **pos;
float *dopshift;
float2 *doplim, *axay, *xyincr;
float3 *w;
float4 *dop;
int4 *xylim;
cudaCalloc((void**)&frame, sizeof(struct dopfrm_t*),nframes);
cudaCalloc((void**)&pos, sizeof(struct pos_t*), nframes);
cudaCalloc((void**)&dopshift, sizeof(float), nframes);
cudaCalloc((void**)&axay, sizeof(float2), nframes);
cudaCalloc((void**)&xyincr, sizeof(float2), nframes);
cudaCalloc((void**)&doplim, sizeof(float2), nframes);
cudaCalloc((void**)&w, sizeof(float3), nframes);
cudaCalloc((void**)&dop, sizeof(float4), nframes);
cudaCalloc((void**)&xylim, sizeof(int4), nframes);
cudaCalloc((void**)&global_lim, sizeof(int), 4);
cudaCalloc((void**)&idop0, sizeof(int), nframes);
cudaCalloc((void**)&ndop, sizeof(int), nframes);
/* Launch nframes-threaded initialization kernel */
THD.x = nframes;
hipLaunchKernelGGL(( pos2doppler_init_af_krnl), dim3(1),dim3(THD), 0, 0, ddat, set, v, frame, pos, nframes,
ndop, idop0, w, doplim, xylim);
checkErrorAfterKernelLaunch("pos2doppler_init_af_krnl");
hipLaunchKernelGGL(( pos2doppler_radar_parameters_af_krnl), dim3(1),dim3(THD), 0, 0, dpar, ddat, frame, pos,
orbit_dopoff, set,nframes, v, axay, xyincr, w, dop, dopshift);
checkErrorAfterKernelLaunch("pos2doppler_radar_parameters_af_krnl");
/* Figure out the largest pos->xlim/ylim window for the entire set */
hipLaunchKernelGGL(( pos2doppler_get_global_frmsz_krnl), dim3(1),dim3(1), 0, 0, global_lim, xylim, nframes);
checkErrorAfterKernelLaunch("pos2doppler_get_global_frmsz_krnl");
deviceSyncAfterKernelLaunch("pos2doppler_get_global_frmsz_krnl");
/* Configure the pixel kernel */
xspan = global_lim[1] - global_lim[0] + 1; //xlim1 - xlim0 + 1;
yspan = global_lim[3] - global_lim[2] + 1; //ylim1 - ylim0 + 1;
frmsz = xspan * yspan;
nThreads = frmsz * nframes;
BLK.x = floor((maxThreadsPerBlock - 1 + nThreads) / maxThreadsPerBlock);
THD.x = maxThreadsPerBlock; // Thread block dimensions
hipLaunchKernelGGL(( pos2doppler_pixel_af_krnl), dim3(BLK),dim3(THD), 0, 0, dpar, dmod, ddat, pos, frame, xspan,
set, nframes, frmsz, nThreads, body, orbit_xoff, orbit_yoff,
axay, doplim, xyincr, dop, ndop, idop0, global_lim, dopshift);
checkErrorAfterKernelLaunch("pos2doppler_pixel_af_krnl");
THD.x = nframes;
/* Launch the single-thread kernel to finish up Doppler calculations */
hipLaunchKernelGGL(( pos2doppler_finish_krnl), dim3(1),dim3(THD.x), 0, 0, dpar, ddat, frame, dop, doplim,
dopshift, idop0, ndop, set, nframes);
checkErrorAfterKernelLaunch("pos2doppler_finish_krnl, line ");
gpuErrchk(hipMemcpyFromSymbol(&badradar, afdop_badradar, sizeof(badradar),
0, hipMemcpyDeviceToHost));
int debug = 0;
if (debug)
dbg_print_fit(ddat, set, 3);
hipFree(frame);
hipFree(pos);
hipFree(dopshift);
hipFree(axay);
hipFree(xyincr);
hipFree(doplim);
hipFree(w);
hipFree(dop);
hipFree(xylim);
hipFree(global_lim);
hipFree(idop0);
return badradar;
}
| 5124365499cfff88aa65f7cc1a42dcb57a40c539.cu | /*****************************************************************************************
pos2doppler.c
Takes a pos_t structure and a doppler_t structure and "fills in" a dopfrm_t structure
indexed by frm. In other words, pos2doppler works from a model plane-of-sky image (with
an observer z-coordinate and a scattering angle assigned to each pixel) to produce a model
Doppler spectrum corresponding to data frame frm.
In the case of an orbiting binary system (the "orbit" action), pos2doppler only computes
power contributions from the orbiting body denoted by the "body" parameter: the routine is
called twice, once for each body.
pos2doppler takes contributions only from the rectangular plane-of-sky region defined by
pos.xlim and pos.ylim -- the smallest rectangle which completely "contains" the model in
the plane of the sky. No power is contributed by parts of the model which extend beyond
the POS window; this is why such models are heavily penalized (the objective function is
doubled -- see function f in file bestfit.c).
idoplim is updated for frame frm to show the model Doppler region that contains nonzero
power.
Modified 2016 November 14 by ME:
Implemented an all-GPU version of pos2doppler.
Modified 2015 June 3 by CM:
Implement smearing for the "fit" and "write" actions by adding "v" (view) parameter
and applying each run of pos2doppler to a single view rather than to an entire
(smeared) frame
Modified 2014 February 10 by CM:
Add "ilaw" argument to the radlaw routine to implement multiple radar scattering laws
Modified 2012 March 23 by CM:
Implement Doppler scaling
Modified 2010 September 1 by CM:
Add braces to an if-then-else statement to avoid compilation warning
Modified 2010 August 26 by CM:
For the "map" action, change the "map_forward" parameter to "map_mode"
and implement map_mode = 'facets'
For the "map" action, implement the "map_verbose" parameter
Modified 2010 June 15 by CM:
Pass the entire par_t parameter structure as an argument rather than
just selected parameters
Implement the map action
Modified 2009 November 15 by CM:
Fix argument type in a printf statement
Modified 2009 April 3 by CM:
Add the "warn_badradar" parameter (see below)
If the sinc^2 Doppler response function extends to too many Doppler
bins, set a flag and compute a factor by which the objective
function should be increased (actually the logarithm of this
factor). If the "warn_badradar" parameter is turned on, print an
explicit warning.
If the model is too wide in Doppler space even for the overflow image,
set a flag and compute a factor by which the objective function
should be increased (actually the logarithm of this factor). If
the "warn_badradar" parameter is turned on, print an explicit
warning.
Make pos2doppler int rather than void in order to return the flag
described above to the calling procedure
Modified 2007 August 4 by CM:
Add orbit_xoff, orbit_yoff, and orbit_dopoff parameters, the x offset
(POS image rows), y offset (POS image columns), and Doppler offset
(spectral bins) of the center of mass due to orbital motion.
Add body parameter to indicate (for the "orbit" action) which of the two
orbiting bodies' power contributions should be computed
Add c (component) argument to radlaw routine
Modified 2006 September 14 by CM:
If the overflow region is too small, print a warning rather than
halting the program
Modified 2006 June 21 by CM:
Change dopres to dop_per_bin
For POS renderings, change res to km_per_pixel
Modified 2006 June 18 by CM:
Allow each Doppler frame in a dataset to have different dimensions
after vignetting
Modified 2006 March 10 by CM:
Pass the "speckle" parameter so that self-noise can be included when
computing the chi squared contribution of the overflow region
Compute overflow_xsec and overflow_dopmean so that these quantities
can be used by the "delcorinit" action
Modified 2005 July 25 by CM:
Fix bug in overall cross-section scale factor: return to Scott's scheme
of normalizing the cross-section contributions from a given POS
pixel so that they sum to the cross section actually present on the
sky in that pixel
Modified 2005 July 20 by CM:
Fix bug in computing floating-point Doppler limits in Hz
Add "facet" argument to radlaw routine
Modified 2005 July 5 by CM:
Eliminate "dir" argument (since we always add power to the model image
and never subtract it)
Add "set" (set number) argument in order to improve error messages
Modified 2005 June 27 by CM:
Rename INFINITY constant to HUGENUMBER to avoid conflicts
Modified 2005 June 25 by CM:
Rename old "doplim" to "idoplim"; this is the Doppler limits in
(integer) bin numbers
Add new "doplim" which is the floating-point Doppler limits in Hz,
obtained PRIOR to convolution with the Doppler response function
Modified 2005 January 25 by CM:
Take care of uninitialized variable
Modified 2003 May 11 by CM:
Compute contributions to chi squared by model power which lies
outside the limits of the data frame.
Modified 2003 May 5 by CM:
For each POS pixel, compute the entire pixel's contribution to a
given Doppler bin in the model spectrum so long as even one point
at which we evaluate the sinc^2 response function is less than
sinc2width/2.0 bins away from that Doppler bin. In other words,
err on the side of computing too many small contributions to each
bin in the model spectrum, so as not to omit significant contributions
just because a POS pixel's *center* isn't close enough in Doppler.
Modified 2003 April 29 by CM:
Evaluate the sinc^2 Doppler response function at nsinc2 points
per POS pixel dimension, not just at the pixel center.
The sinc^2 function varies rapidly -- one null per Doppler bin
away from the central peak -- so if the pixel width is more than
about half the Doppler resolution, we want to take the mean of
several points within the pixel.
Modified 2003 April 26 by CM:
Zero out the sinc^2 Doppler response function beyond the
nearest sinc2width bins rather than beyond the nearest 2 bins
Modified 2003 April 17 by CM:
Now correctly scales the model Doppler spectrum to account for
Doppler mismatching
*****************************************************************************************/
extern "C" {
#include "head.h"
}
/* Declare __device__ vars and structs, which have file scope */
__device__ int afdop_nsinc2_sq, afdop_any_overflow, afdop_in_bounds,
afdop_badradar;
/* Note that both pos2deldop_cuda.cu and posvis_cuda.cu have the atomicMaxf
* and atomicMinf device functions defined separately. This is done due to
* the way static device functions I handled I guess. I tried putting them
* into a separate file and a declaration in the shape-cuda.h header file,
* but to no avail. So here they are, duplicated in both files. */
__device__ static float atomicMaxf(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fmaxf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
__device__ static float atomicMinf(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fminf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
__global__ void pos2doppler_init_af_krnl(struct dat_t *ddat, int set, int v,
struct dopfrm_t **frame, struct pos_t **pos, int nframes, int *ndop,
int *idop0, float3 *w, float2 *doplim, int4 *xylim) {
/* nframes-threaded kernel */
int frm = threadIdx.x;
if (frm < nframes) {
/* Initialize variables */
idop0[frm] = 0;
afdop_any_overflow = 0;
frame[frm] = &ddat->set[set].desc.doppler.frame[frm];
pos[frm] = &frame[frm]->pos;
ndop[frm] = frame[frm]->ndop;
frame[frm]->idoplim[0] = ndop[frm] + 999999;
frame[frm]->idoplim[1] = -999999;
frame[frm]->doplim[0] = HUGENUMBER;
frame[frm]->doplim[1] = -HUGENUMBER;
afdop_badradar = 0;
frame[frm]->badradar_logfactor = 0.0;
/* Get w, the apparent spin vector in observer coordinates */
dev_cotrans4(w, frame[frm]->view[v].oe, frame[frm]->view[v].spin, 1, frm);
/* Copy frame->doplim over to the float device variable */
doplim[frm].x = frame[frm]->doplim[0];
doplim[frm].y = frame[frm]->doplim[1];
/* Now get pos->xlim[0], pos->xlim[1], pos->ylim[0], pos->ylim[1] */
xylim[frm].w = pos[frm]->xlim[0];
xylim[frm].x = pos[frm]->xlim[1];
xylim[frm].y = pos[frm]->ylim[0];
xylim[frm].z = pos[frm]->ylim[1];
}
}
__global__ void pos2doppler_radar_parameters_af_krnl(struct par_t *dpar,
struct dat_t *ddat, struct dopfrm_t **frame, struct pos_t **pos,
double orbit_dopoff, int set, int nframes, int v, float2 *axay,
float2 *xyincr, float3 *w, float4 *dop, float *dopshift) {
/* nframes-threaded kernel */
int frm = threadIdx.x;
double dopfact;
if (frm < nframes) {
/* Compute the Doppler bin increment per plane-of-sky pixel westward (ax)
and northward (ay); these values are scaled by the "dopscale" parameter
for this dataset. Then compute km2Hz, the Doppler increment (Hz) per
km perpendicular to the projected spin axis in the plane of the sky. */
dopfact = ddat->set[set].desc.doppler.dopscale.val * KM2HZFACT * pos[0]->km_per_pixel
* ddat->set[set].desc.doppler.Ftx / ddat->set[set].desc.doppler.dop_per_bin;
axay[frm].x = -w[frm].y*dopfact;
axay[frm].y = w[frm].x*dopfact;
frame[frm]->view[v].km2Hz = sqrt(axay[frm].x*axay[frm].x +
axay[frm].y*axay[frm].y) * ddat->set[set].desc.doppler.dop_per_bin
/ pos[frm]->km_per_pixel;
/* Compute absolute value of the difference between maximum (or minimum)
* Doppler on any given POS pixel's edge and the Doppler at its center */
/* dop.w - dopdiff_bl
* dop.x - dopdiff_max
* dop.y - dopDC_vig
* dop.z - dop_extra */
if (w[frm].x != 0.0 || w[frm].y != 0.0)
dop[frm].z = frame[frm]->view[v].km2Hz * 0.5 * pos[frm]->km_per_pixel
* sqrt(w[frm].x*w[frm].x + w[frm].y*w[frm].y) /
MAX( fabs(w[frm].x), fabs(w[frm].y));
else
dop[frm].z = 0.0;
/* We may be evaluating the sinc^2 Doppler response function at
more than one point per POS pixel. xincr and yincr are the
Doppler bin increments between adjacent evaluation points in the
x and y directions. dopdiff_bl is the Doppler bin difference
between the bottom-leftmost (southeasternmost) evaluation point
and the pixel center. dopdiff_max is the maximum positive
Doppler bin difference between any evaluation point and the
pixel center. */
afdop_nsinc2_sq = dpar->nsinc2 * dpar->nsinc2;
xyincr[frm].x = axay[frm].x / dpar->nsinc2;
xyincr[frm].y = axay[frm].y / dpar->nsinc2;
dop[frm].w = -(dpar->nsinc2 - 1)*(xyincr[frm].x + xyincr[frm].y)/2;
dop[frm].x = (dpar->nsinc2 - 1)*(fabs(xyincr[frm].x) + fabs(xyincr[frm].x))/2;
if (2*dop[frm].x + dpar->sinc2width + 1 > MAXBINS) {
afdop_badradar = 1;
frame[frm]->badradar_logfactor += log((2*dop[frm].x + dpar->sinc2width + 1) / MAXBINS);
if (dpar->warn_badradar) {
printf("\nWARNING in pos2doppler.c for set %2d frame %2d:\n", set, frm);
printf(" sinc^2 function evaluated at %d Doppler bins, must be <= %d\n",
(int) ceil(2*dop[frm].x + dpar->sinc2width + 1), MAXBINS);
}
}
/* Get the COM Doppler bin, corrected for ephemeris drift
and adjusted for orbital motion */
dopshift[frm] = frame[frm]->dopcom_vig + frame[frm]->view[v].dopoff + orbit_dopoff;
}
}
__global__ void pos2doppler_get_global_frmsz_krnl(int *global_lim, int4 *xylim,
int nframes) {
/* nframes-threaded kernel */
int f = threadIdx.x;
if (f < nframes) {
/* Initialize global_lim */
for (int i=0; i<4; i++)
global_lim[i] = 0;
/* Now calculate minimum for all frames */
atomicMin(&global_lim[0], xylim[f].w);
atomicMax(&global_lim[1], xylim[f].x);
atomicMin(&global_lim[2], xylim[f].y);
atomicMax(&global_lim[3], xylim[f].z);
}
}
__global__ void pos2doppler_pixel_af_krnl(
struct par_t *dpar,
struct mod_t *dmod,
struct dat_t *ddat,
struct pos_t **pos,
struct dopfrm_t **frame,
int xspan, int set, int nframes, int frame_size, int total_size, int body,
double orbit_xoff, double orbit_yoff,
float2 *axay, float2 *doplim, float2 *xyincr, float4 *dop,
int *ndop, int *idop0, int *global_lim,
float *dopshift) {
/* Multi-threaded kernel */
int total_offset = blockIdx.x * blockDim.x + threadIdx.x;
int offset = total_offset % frame_size;
int frm = total_offset / frame_size;
int x = offset % xspan + global_lim[0]; // pos[frm]->xlim[0];
int y = offset / xspan + global_lim[2]; // pos[frm]->ylim[0];
int n ;
int idop, idop_min, idop_max, idop1, idop2, i, j, c, f, k, zaddr;
double tmp, amp, arg_left, sinc2arg, sinc2_mean, arg_bl, fit_contribution,
sumweights, dop_contribution[MAXBINS], dopPOS;
if ((offset < frame_size) && (frm < nframes)) {
n = pos[frm]->n;
zaddr = (y+n)*(2*n+1) + (x+n);
/* Loop through all POS pixels within the rectangular plane-of-sky
* region spanned by the model; for each such pixel which isn't blank
* sky, compute the cross-section contributions to pixels in the model
* Doppler spectrum. Note that functions posclr and posvis flag
* blank-sky pixels by assigning "cose" = cos(scattering angle) = 0.
* Only compute contributions from POS pixels that project onto the
* right body, in case this is the "orbit" action (for which this
* routine is called twice, once for each of the 2 orbiting bodies).*/
if (pos[frm]->cose_s[zaddr] > 0.0 && pos[frm]->body[x][y] == body) {
/* Get the fp Doppler bin of POS pixel center: dopPOS. Also get the
* min and max int Doppler bins to which this pixel contributes
* power: idop_min and idop_max. Each POS pixel contributes power
* to *all* Doppler bins, but here we're zeroing out the sinc^2
* response function beyond the nearest sinc2width bins.
* Actually, if nsinc2 > 1, we'll distribute power to *at least*
* sinc2width Doppler bins: For pixels which span multiple bins
* we'll err on the side of computing more contributions rather
* than fewer. */
/* dop.w - dopdiff_bl
* dop.x - dopdiff_max
* dop.y - dopDC_vig
* dop.z - dop_extra */
dopPOS = axay[frm].x*(x - orbit_xoff) + axay[frm].y*(y - orbit_yoff) + dopshift[frm];
idop_min = (int) floor(dopPOS - dop[frm].x + 1 - dpar->sinc2width/2.0);
idop_max = (int) floor(dopPOS + dop[frm].x + dpar->sinc2width/2.0);
/* Update the rectangular delay-Doppler region with nonzero power
* according to the model */
atomicMin(&frame[frm]->idoplim[0], idop_min);
atomicMax(&frame[frm]->idoplim[1], idop_max);
/* Update model's fp Doppler limits, as determined prior to
* convolution with the Doppler response function. At this point
* in the code, doplim is a pair of floating-point bin numbers
* which applies to POS pixel centers; when the loop over POS
* pixels is finished we will convert these limits to Hz and will
* widen the limits to account for nonzero POS pixel width. */
/* Note that p2d_doplim[2] is a single-precision (float) copy of
* the original p2d_frame->doplim[2] (double-precision). This is
* necessary to get atomic operations to work. */
atomicMinf(&doplim[frm].x, dopPOS);
atomicMaxf(&doplim[frm].y, dopPOS);
/* Check if all Doppler bins which will receive power from this POS
* pixel fall within the data frame; if not, initialize the
* "overflow" spectrum if necessary. */
if ( (idop_min >= 1) && (idop_max <= ndop[frm]) )
afdop_in_bounds = 1;
else {
afdop_in_bounds = 0;
if (!afdop_any_overflow) {
afdop_any_overflow = 1;
for (j=0; j<MAXOVERFLOW; j++)
frame[frm]->fit_overflow[j] = 0.0; // To-Do: This might need attention.
/* Center the COM in the overflow spectrum: bin [idop] in
* the fit frame corresponds to bin [idop+idop0] in the
* fit_overflow frame. */
idop0[frm] = MAXOVERFLOW/2 - (int) floor(dopshift[frm] + 0.5);
}
}
/* Compute the sinc^2 factors for Doppler mismatching: Take the
* mean of nsinc2^2 points interior to the POS pixel. Do the two
* most common cases (nsinc2 = 1 or 2) without loops to gain speed.
* Note the SINC2 macro multiplies its argument by pi. Then add the
* cross-section contributions to the model spectrum. */
for (idop=idop_min; idop<=idop_max; idop++) {
switch (dpar->nsinc2) {
case 1:
sinc2_mean = SINC2(dopPOS - idop);
break;
case 2:
arg_bl = dopPOS + dop[frm].w - idop; /* bl = bottom left */
sinc2_mean = (SINC2(arg_bl) +
SINC2(arg_bl+xyincr[frm].x) +
SINC2(arg_bl+xyincr[frm].y) +
SINC2(arg_bl+xyincr[frm].x+xyincr[frm].y)) / 4;
break;
default:
arg_left = dopPOS + dop[frm].w - idop;
sinc2_mean = 0.0;
for (i=0; i<dpar->nsinc2; i++) {
sinc2arg = arg_left;
for (j=0; j<dpar->nsinc2; j++) {
sinc2_mean += SINC2(sinc2arg);
sinc2arg += xyincr[frm].x;
}
arg_left += xyincr[frm].y;
}
sinc2_mean /= afdop_nsinc2_sq;
break;
}
k = MIN( idop - idop_min, MAXBINS);
dop_contribution[k] = sinc2_mean;
}
/* Compute the sum of Doppler weighting factors */
sumweights = 0.0;
for (idop=idop_min; idop<=idop_max; idop++) {
k = MIN( idop - idop_min, MAXBINS);
sumweights += dop_contribution[k];
}
/* The radar cross section within this plane-of-sky pixel is
* [differential radar scattering law]*[POS pixel area in km^2].
* The differential radar scattering law (function radlaw
* = d[cross section]/d[area] ) includes a sec(theta) factor to
* account for the fact that the POS pixel area is projected area
* rather than physical area on the target surface. */
amp = dev_radlaw(&dmod->photo, ddat->set[set].desc.doppler.iradlaw,
pos[frm]->cose_s[zaddr], pos[frm]->comp[x][y], pos[frm]->f[x][y])
* pos[frm]->km_per_pixel * pos[frm]->km_per_pixel / sumweights;
/* Only add POS pixel's power contributions to model Doppler spect-
* rum if NONE of those contributions fall outside spectrum limits*/
if (afdop_in_bounds) {
/* Add the cross-section contributions to the model frame */
for (idop=idop_min; idop<=idop_max; idop++) {
k = MIN( idop - idop_min, MAXBINS);
fit_contribution = amp * dop_contribution[k];
atomicAdd(&ddat->set[set].desc.doppler.frame[frm].fit_s[idop],
fit_contribution);
if (dpar->action == MAP) {
if (dpar->map_mode == MAPMODE_DELDOP) {
if (frame[frm]->map_fit[idop] > 0.0) {
frame[frm]->map_pos[x][y] += fit_contribution;
c = pos[frm]->comp[x][y];
f = pos[frm]->f[x][y];
frame[frm]->map_facet_power[c][f] += fit_contribution;
if (dpar->map_verbose)
printf("# POS (%3d, %3d) comp %d facet %4d contributes %e to Dop (%3d)\n",
x+n, y+n, c, f, fit_contribution, idop-1);
}
} else if (dpar->map_mode == MAPMODE_POS) {
if (frame[frm]->map_pos[x][y] > 0.0) {
frame[frm]->map_fit[idop] += fit_contribution;
c = pos[frm]->comp[x][y];
f = pos[frm]->f[x][y];
frame[frm]->map_facet_power[c][f] += fit_contribution;
if (dpar->map_verbose)
printf("# POS (%3d, %3d) comp %d facet %4d contributes %e to Dop (%3d)\n",
x+n, y+n, c, f, fit_contribution, idop-1);
}
} else {
if (frame[frm]->map_pos[x][y] > 0.0) {
frame[frm]->map_fit[idop] += fit_contribution;
if (dpar->map_verbose) {
c = pos[frm]->comp[x][y];
f = pos[frm]->f[x][y];
printf("# POS (%3d, %3d) comp %d facet %4d contributes %e to Dop (%3d)\n",
x+n, y+n, c, f, fit_contribution, idop-1);
}
}
}
}
}
} else {
/* Add the cross-section contributions to the "overflow" spectrum */
if (dpar->action == MAP && dpar->map_mode != MAPMODE_DELDOP)
if (frame[frm]->map_pos[x][y] > 0.0)
dpar->map_overflow = 1;
idop1 = MAX( idop_min, -idop0[frm]);
idop2 = MIN( idop_max, -idop0[frm] + MAXOVERFLOW - 1);
for (idop=idop1; idop<=idop2; idop++) {
k = MIN( idop - idop_min, MAXBINS);
fit_contribution = amp * dop_contribution[k];
frame[frm]->fit_overflow[idop+idop0[frm]] += fit_contribution; // might need atomics
if (dpar->action == MAP && dpar->map_mode == MAPMODE_DELDOP)
if (idop >= dpar->map_doplim[0] && idop <= dpar->map_doplim[1]) {
frame[frm]->map_pos[x][y] += fit_contribution;
c = pos[frm]->comp[x][y];
f = pos[frm]->f[x][y];
frame[frm]->map_facet_power[c][f] += fit_contribution;
if (dpar->map_verbose)
printf("# POS (%3d, %3d) comp %d facet %4d contributes %e to Dop (%3d)\n",
x+n, y+n, c, f, fit_contribution, idop-1);
}
}
}
} /* if cos(scattering angle) > 0 and POS pixel projects onto the right body */
}
}
__global__ void pos2doppler_finish_krnl(struct par_t *dpar, struct dat_t *ddat,
struct dopfrm_t **frame, float4 *dop, float2 *doplim, float *dopshift,
int *idop0, int *ndop, int set, int nframes) {
/* Single-threaded kernel */
int frm = threadIdx.x;
int j, j1, j2;
double lookfact, sdev_sq, variance, dopfactor;
if (threadIdx.x <nframes) {
/* Copy float device variable over to the frame->doplim */
frame[frm]->doplim[0] = doplim[frm].x;
frame[frm]->doplim[1] = doplim[frm].y;
/* Convert model's Doppler limits from float bin numbers to Hz and
* widen the limits to account for nonzero POS pixel width */
frame[frm]->doplim[0] = (frame[frm]->doplim[0] - dopshift[frm])*
ddat->set[set].desc.doppler.dop_per_bin - dop[frm].z;
frame[frm]->doplim[1] = (frame[frm]->doplim[1] - dopshift[frm])*
ddat->set[set].desc.doppler.dop_per_bin + dop[frm].z;
/* Calculate overflow contributions to chi squared:
* o2 = obs^2 contribution, m2 = model^2 contribution.
* Also compute summed cross section and mean Doppler bin for overflow
* region, for use with the "delcorinit" action */
frame[frm]->overflow_o2 = 0.0;
frame[frm]->overflow_m2 = 0.0;
frame[frm]->overflow_xsec = 0.0;
frame[frm]->overflow_dopmean = 0.0;
sdev_sq = frame[frm]->sdev*frame[frm]->sdev;
variance = sdev_sq;
lookfact = (frame[frm]->nlooks > 0.0) ? 1.0/frame[frm]->nlooks : 0.0;
if (afdop_any_overflow) {
j1 = MAX(frame[frm]->idoplim[0] + idop0[frm], 0);
j2 = MIN(frame[frm]->idoplim[1] + idop0[frm], MAXOVERFLOW - 1);
for (j=j1; j<=j2; j++) {
if (frame[frm]->fit_overflow[j] != 0.0) {
if (dpar->speckle)
variance = sdev_sq + lookfact*frame[frm]->fit_overflow[j]*
frame[frm]->fit_overflow[j];
frame[frm]->overflow_o2 += 1.0;
frame[frm]->overflow_m2 += frame[frm]->fit_overflow[j]*
frame[frm]->fit_overflow[j]/variance;
frame[frm]->overflow_xsec += frame[frm]->fit_overflow[j];
frame[frm]->overflow_dopmean += (j - idop0[frm])*frame[frm]->fit_overflow[j];
}
}
if (frame[frm]->overflow_xsec != 0.0)
frame[frm]->overflow_dopmean /= frame[frm]->overflow_xsec;
/* Print a warning if the model extends even beyond the overflow spectrum */
if ( ((frame[frm]->idoplim[0] + idop0[frm]) < 0) ||
((frame[frm]->idoplim[1] + idop0[frm]) >= MAXOVERFLOW) ) {
afdop_badradar = 1;
dopfactor = (MAX(frame[frm]->idoplim[1] + idop0[frm], MAXOVERFLOW)
- MIN(frame[frm]->idoplim[0] + idop0[frm], 0) )
/ (1.0*MAXOVERFLOW);
frame[frm]->badradar_logfactor += log(dopfactor);
if (dpar->warn_badradar) {
printf("\nWARNING in pos2doppler.c for set %2d frame %2d:\n", set, frm);
printf(" model Doppler spectrum extends too far beyond the data spectrum\n");
printf(" data: bins %2d to %2d\n", 1, ndop[frm]);
printf(" model: bins %2d to %2d\n",
frame[frm]->idoplim[0], frame[frm]->idoplim[1]);
}
}
}
}
}
__host__ int pos2doppler_cuda_af( struct par_t *dpar, struct mod_t *dmod,
struct dat_t *ddat, double orbit_xoff, double orbit_yoff, double
orbit_dopoff, int body, int set, int nframes, int v)
{
int badradar, xspan, yspan, nThreads, frmsz, *global_lim, *idop0, *ndop;
dim3 BLK, THD;
struct dopfrm_t **frame;
struct pos_t **pos;
float *dopshift;
float2 *doplim, *axay, *xyincr;
float3 *w;
float4 *dop;
int4 *xylim;
cudaCalloc((void**)&frame, sizeof(struct dopfrm_t*),nframes);
cudaCalloc((void**)&pos, sizeof(struct pos_t*), nframes);
cudaCalloc((void**)&dopshift, sizeof(float), nframes);
cudaCalloc((void**)&axay, sizeof(float2), nframes);
cudaCalloc((void**)&xyincr, sizeof(float2), nframes);
cudaCalloc((void**)&doplim, sizeof(float2), nframes);
cudaCalloc((void**)&w, sizeof(float3), nframes);
cudaCalloc((void**)&dop, sizeof(float4), nframes);
cudaCalloc((void**)&xylim, sizeof(int4), nframes);
cudaCalloc((void**)&global_lim, sizeof(int), 4);
cudaCalloc((void**)&idop0, sizeof(int), nframes);
cudaCalloc((void**)&ndop, sizeof(int), nframes);
/* Launch nframes-threaded initialization kernel */
THD.x = nframes;
pos2doppler_init_af_krnl<<<1,THD>>>(ddat, set, v, frame, pos, nframes,
ndop, idop0, w, doplim, xylim);
checkErrorAfterKernelLaunch("pos2doppler_init_af_krnl");
pos2doppler_radar_parameters_af_krnl<<<1,THD>>>(dpar, ddat, frame, pos,
orbit_dopoff, set,nframes, v, axay, xyincr, w, dop, dopshift);
checkErrorAfterKernelLaunch("pos2doppler_radar_parameters_af_krnl");
/* Figure out the largest pos->xlim/ylim window for the entire set */
pos2doppler_get_global_frmsz_krnl<<<1,1>>>(global_lim, xylim, nframes);
checkErrorAfterKernelLaunch("pos2doppler_get_global_frmsz_krnl");
deviceSyncAfterKernelLaunch("pos2doppler_get_global_frmsz_krnl");
/* Configure the pixel kernel */
xspan = global_lim[1] - global_lim[0] + 1; //xlim1 - xlim0 + 1;
yspan = global_lim[3] - global_lim[2] + 1; //ylim1 - ylim0 + 1;
frmsz = xspan * yspan;
nThreads = frmsz * nframes;
BLK.x = floor((maxThreadsPerBlock - 1 + nThreads) / maxThreadsPerBlock);
THD.x = maxThreadsPerBlock; // Thread block dimensions
pos2doppler_pixel_af_krnl<<<BLK,THD>>>(dpar, dmod, ddat, pos, frame, xspan,
set, nframes, frmsz, nThreads, body, orbit_xoff, orbit_yoff,
axay, doplim, xyincr, dop, ndop, idop0, global_lim, dopshift);
checkErrorAfterKernelLaunch("pos2doppler_pixel_af_krnl");
THD.x = nframes;
/* Launch the single-thread kernel to finish up Doppler calculations */
pos2doppler_finish_krnl<<<1,THD.x>>>(dpar, ddat, frame, dop, doplim,
dopshift, idop0, ndop, set, nframes);
checkErrorAfterKernelLaunch("pos2doppler_finish_krnl, line ");
gpuErrchk(cudaMemcpyFromSymbol(&badradar, afdop_badradar, sizeof(badradar),
0, cudaMemcpyDeviceToHost));
int debug = 0;
if (debug)
dbg_print_fit(ddat, set, 3);
cudaFree(frame);
cudaFree(pos);
cudaFree(dopshift);
cudaFree(axay);
cudaFree(xyincr);
cudaFree(doplim);
cudaFree(w);
cudaFree(dop);
cudaFree(xylim);
cudaFree(global_lim);
cudaFree(idop0);
return badradar;
}
|
57aa0530a44e0ca314952085241226a5d0f05e38.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/AccumulateType.h>
#include <ATen/native/Pool.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/detail/TensorInfo.cuh>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/hip/detail/KernelUtils.h>
#include <THH/THHNumerics.cuh>
#include <c10/macros/Macros.h>
namespace at {
namespace native {
namespace {
__device__ inline int min(int a, int b) {
return a <= b ? a : b;
}
template <typename scalar_t>
__global__ static void max_pool3d_with_indices_single_out_frame(
scalar_t* inputData,
PackedTensorAccessor<scalar_t, 4> output,
PackedTensorAccessor<int64_t, 4> indices,
int itime, int iheight, int iwidth,
int kT, int kH, int kW,
int dT, int dH, int dW,
int pT, int pH, int pW,
int dilationT, int dilationH, int dilationW,
int offsetZ)
{
int oColumn = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % output.size(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / output.size(1); // output slice/feature
if (oRow < output.size(2) && oColumn < output.size(3))
{
int tStart = oFrame * dT - pT;
int hStart = oRow * dH - pH;
int wStart = oColumn * dW - pW;
int tEnd = min(tStart + (kT - 1) * dilationT + 1, itime);
int hEnd = min(hStart + (kH - 1) * dilationH + 1, iheight);
int wEnd = min(wStart + (kW - 1) * dilationW + 1, iwidth);
while(tStart < 0)
tStart += dilationT;
while(hStart < 0)
hStart += dilationH;
while(wStart < 0)
wStart += dilationW;
int index = 0;
int maxIndex = -1;
inputData += slice * itime * iheight * iwidth;
scalar_t max = THCNumerics<scalar_t>::min();
for (int t = tStart; t < tEnd; t += dilationT)
{
for (int h = hStart; h < hEnd; h += dilationH)
{
for (int w = wStart; w < wEnd; w += dilationW)
{
index = t * iheight * iwidth + h * iwidth + w;
scalar_t val = inputData[index];
if ((max < val) || THCNumerics<scalar_t>::isnan(val))
{
max = val;
maxIndex = index;
}
}
}
}
output[slice][oFrame][oRow][oColumn] = max;
indices[slice][oFrame][oRow][oColumn] = maxIndex;
}
}
template <int KERNEL_WIDTH, typename scalar_t>
__global__ static void max_pool3d_with_indices_single_out_frame(
scalar_t* inputData,
PackedTensorAccessor<scalar_t, 4> output,
PackedTensorAccessor<int64_t, 4> indices,
int itime, int iheight, int iwidth,
int kT, int kH,
int dT, int dH, int dW,
int pT, int pH, int pW,
int dilationT, int dilationH, int dilationW,
int offsetZ)
{
int oColumn = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % output.size(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / output.size(1); // output slice/feature
if (oRow < output.size(2) && oColumn < output.size(3))
{
int tStart = oFrame * dT - pT;
int hStart = oRow * dH - pH;
int wStart = oColumn * dW - pW;
int tEnd = min(tStart + (kT - 1) * dilationT + 1, itime);
int hEnd = min(hStart + (kH - 1) * dilationH + 1, iheight);
int wEnd = min(wStart + (KERNEL_WIDTH - 1) * dilationW + 1, iwidth);
while(tStart < 0)
tStart += dilationT;
while(hStart < 0)
hStart += dilationH;
while(wStart < 0)
wStart += dilationW;
int index = 0;
int maxIndex = -1;
scalar_t max = THCNumerics<scalar_t>::min();
for (int t = tStart; t < tEnd; t += dilationT)
{
for (int h = hStart; h < hEnd; h += dilationH)
{
for (int w = wStart; w < wEnd; w += dilationW)
{
index = t * iheight * iwidth + h * iwidth + w;
scalar_t val = inputData[slice * itime * iheight * iwidth + index];
if (max < val)
{
max = val;
maxIndex = index;
}
}
}
}
output[slice][oFrame][oRow][oColumn] = max;
indices[slice][oFrame][oRow][oColumn] = maxIndex;
}
}
#define UPDATE_OUTPUT_KERNEL_WIDTH(KW) case KW: \
hipLaunchKernelGGL(( max_pool3d_with_indices_single_out_frame<KW>) \
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
input_data, \
output.packed_accessor<scalar_t, 4>(), \
indices.packed_accessor<int64_t, 4>(), \
itime, iheight, iwidth, \
kT, kH, \
dT, dH, dW, \
pT, pH, pW, \
dilationT, dilationH, dilationW, offsetZ); \
break
template <typename scalar_t>
void max_pool3d_with_indices_out_frame(
scalar_t* input_data,
const Tensor& output,
const Tensor& indices,
int totalZ,
int itime, int iheight, int iwidth,
int otime, int oheight, int owidth,
int kT, int kH, int kW,
int dT, int dH, int dW,
int pT, int pH, int pW,
int dilationT, int dilationH, int dilationW)
{
int offsetZ = 0;
dim3 block(32, 8);
while (totalZ > 0) {
dim3 grid(cuda::ATenCeilDiv(owidth, static_cast<int>(block.x)),
cuda::ATenCeilDiv(oheight, static_cast<int>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
switch (kW) {
UPDATE_OUTPUT_KERNEL_WIDTH(1);
UPDATE_OUTPUT_KERNEL_WIDTH(2);
UPDATE_OUTPUT_KERNEL_WIDTH(3);
UPDATE_OUTPUT_KERNEL_WIDTH(4);
UPDATE_OUTPUT_KERNEL_WIDTH(5);
UPDATE_OUTPUT_KERNEL_WIDTH(6);
UPDATE_OUTPUT_KERNEL_WIDTH(7);
default:
hipLaunchKernelGGL(( max_pool3d_with_indices_single_out_frame)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input_data,
output.packed_accessor<scalar_t, 4>(),
indices.packed_accessor<int64_t, 4>(),
itime, iheight, iwidth,
kT, kH, kW,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW,
offsetZ);
}
TORCH_CHECK(hipGetLastError() == hipSuccess,
"max_pool3d_backward_out_cuda_frame failed with error code ",
hipGetLastError());
totalZ -= 65535;
offsetZ += 65535;
}
}
#undef UPDATE_OUTPUT_KERNEL_WIDTH
template <typename scalar_t>
__global__ static void max_pool3d_with_indices_backward_single_out_frame(
scalar_t *gradInputData,
PackedTensorAccessor<scalar_t, 4> gradOutput,
PackedTensorAccessor<int64_t, 4> indices,
int itime, int iheight, int iwidth,
int dT, int dH, int dW,
int pT, int pH, int pW,
int dilationT, int dilationH, int dilationW,
int offsetZ)
{
int oColumn = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % gradOutput.size(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / gradOutput.size(1); // output slice/feature
if (oRow < gradOutput.size(2) && oColumn < gradOutput.size(3))
{
int maxIndex = indices[slice][oFrame][oRow][oColumn];
if (maxIndex != -1) {
atomicAdd(&gradInputData[slice * itime * iheight * iwidth + maxIndex],
gradOutput[slice][oFrame][oRow][oColumn]);
}
}
}
template <typename scalar_t>
void max_pool3d_with_indices_backward_out_frame(
scalar_t *gradInputData,
const Tensor& gradOutput,
const Tensor& indices,
int64_t totalZ,
int itime, int iheight, int iwidth,
int oheight, int owidth,
int dT, int dH, int dW,
int pT, int pH, int pW,
int dilationT, int dilationH, int dilationW)
{
int offsetZ = 0;
dim3 block(32, 8);
while (totalZ > 0) {
dim3 grid(cuda::ATenCeilDiv(owidth, static_cast<int>(block.x)),
cuda::ATenCeilDiv(oheight, static_cast<int>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
hipLaunchKernelGGL(( max_pool3d_with_indices_backward_single_out_frame)
, dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
gradInputData,
gradOutput.packed_accessor<scalar_t, 4>(),
indices.packed_accessor<int64_t, 4>(),
itime, iheight, iwidth,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW,
offsetZ);
TORCH_CHECK(hipGetLastError() == hipSuccess,
"max_pool3d_with_indices_backward_out_frame failed with error code ",
hipGetLastError());
totalZ -= 65535;
offsetZ += 65535;
}
}
void max_pool3d_with_indices_out_cuda_template(
Tensor& output,
Tensor& indices,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
TensorArg output_arg{ output, "output", 1 };
TensorArg indices_arg{ indices, "indices", 2 };
TensorArg input_arg{ input, "input", 3 };
checkAllSameGPU("max_pool3d_with_indices_out_cuda",
{output_arg, indices_arg, input_arg});
// XXX [JIT] Pooling.cpp allows stride.empty().
// XXX [LIBTORCH] IntegrationTest.MNIST: padding.size() == 1 && dilation.size() == 1.
TORCH_CHECK(kernel_size.size() == 3 &&
(stride.empty() || stride.size() == 3) &&
(padding.size() == 1 || padding.size() == 3) &&
(dilation.size() == 1 || dilation.size() == 3),
"max_pool3d_with_indices: internal error: all IntArrayRef sizes must be 3");
TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5),
"non-empty 4D or 5D (batch mode) tensor expected for input");
const int kT = safe_downcast<int, int64_t>(kernel_size[0]);
const int kH = safe_downcast<int, int64_t>(kernel_size[1]);
const int kW = safe_downcast<int, int64_t>(kernel_size[2]);
const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]);
const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[1]);
const int dW = stride.empty() ? kW : safe_downcast<int, int64_t>(stride[2]);
const int pT = safe_downcast<int, int64_t>(padding[0]);
const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]);
const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]);
const int dilationT = safe_downcast<int, int64_t>(dilation[0]);
const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]);
const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]);
const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1;
const int64_t nslices = input.size(-4);
const int64_t itime = input.size(-3);
const int64_t iheight = input.size(-2);
const int64_t iwidth = input.size(-1);
const int64_t otime = pooling_output_shape<int64_t>(itime, kT, pT, dT, dilationT, ceil_mode);
const int64_t oheight = pooling_output_shape<int64_t>(iheight, kH, pH, dH, dilationH, ceil_mode);
const int64_t owidth = pooling_output_shape<int64_t>(iwidth, kW, pW, dW, dilationW, ceil_mode);
max_pool3d_with_indices_shape_check(
input,
nslices,
kT, kH, kW,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW,
itime, iheight, iwidth,
otime, oheight, owidth);
if (input.ndimension() == 4) {
output.resize_({ nslices, otime, oheight, owidth});
indices.resize_({nslices, otime, oheight, owidth});
}
else {
output.resize_({nbatch, nslices, otime, oheight, owidth});
indices.resize_({nbatch, nslices, otime, oheight, owidth});
}
Tensor work_input = input.contiguous();
Tensor work_output = output;
Tensor work_indices = indices;
if (input.ndimension() == 5) {
// Collapse batch and feature dimensions.
work_input = work_input.reshape({nbatch * nslices, itime, iheight, iwidth});
work_output = work_output.reshape({nbatch * nslices, otime, oheight, owidth});
work_indices = work_indices.reshape({nbatch * nslices, otime, oheight, owidth});
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(),
"max_pool3d_with_indices_out_frame",
[&]{
scalar_t *input_data = work_input.data<scalar_t>();
int64_t totalZ = otime * nslices * nbatch;
max_pool3d_with_indices_out_frame(
input_data, work_output, work_indices,
totalZ,
itime, iheight, iwidth,
otime, oheight, owidth,
kT, kH, kW,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW);
}
);
}
void max_pool3d_with_indices_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
const Tensor& indices,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
TensorArg gradInput_arg{ gradInput, "gradInput", 1 };
TensorArg gradOutput_arg{ gradOutput, "gradOutput", 2 };
TensorArg input_arg{ input, "input", 3 };
TensorArg indices_arg{ indices, "indices", 4 };
checkAllSameGPU("max_pool3d_with_indices_backward_out_cuda",
{gradInput_arg, gradOutput_arg, input_arg, indices_arg});
// XXX [JIT] Pooling.cpp allows stride.empty().
// XXX [LIBTORCH] IntegrationTest.MNIST: padding.size() == 1 && dilation.size() == 1.
TORCH_CHECK(kernel_size.size() == 3 &&
(stride.empty() || stride.size() == 3) &&
(padding.size() == 1 || padding.size() == 3) &&
(dilation.size() == 1 || dilation.size() == 3),
"max_pool3d_with_indices: internal error: all IntArrayRef sizes must be 3");
TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5),
"non-empty 4D or 5D (batch mode) tensor expected for input");
TORCH_CHECK((gradOutput.ndimension() == 4 || gradOutput.ndimension() == 5),
"non-empty 4D or 5D (batch mode) tensor expected for gradOutput");
// Resize and initialize result tensor.
gradInput.resize_as_(input);
gradInput.zero_();
const int kT = safe_downcast<int, int64_t>(kernel_size[0]);
const int kH = safe_downcast<int, int64_t>(kernel_size[1]);
const int kW = safe_downcast<int, int64_t>(kernel_size[2]);
const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]);
const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[1]);
const int dW = stride.empty() ? kW : safe_downcast<int, int64_t>(stride[2]);
const int pT = safe_downcast<int, int64_t>(padding[0]);
const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]);
const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]);
const int dilationT = safe_downcast<int, int64_t>(dilation[0]);
const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]);
const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]);
const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1;
const int64_t nslices = input.size(-4);
const int64_t otime = gradOutput.size(-3);
const int64_t oheight = gradOutput.size(-2);
const int64_t owidth = gradOutput.size(-1);
const int64_t itime = gradInput.size(-3);
const int64_t iheight = gradInput.size(-2);
const int64_t iwidth = gradInput.size(-1);
max_pool3d_with_indices_shape_check(
input,
gradOutput,
indices,
nslices,
kT, kH, kW,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW,
itime, iheight, iwidth,
otime, oheight, owidth);
Tensor work_grad_input = gradInput;
Tensor work_grad_output = gradOutput.contiguous();
Tensor work_indices = indices.contiguous();
if (input.ndimension() == 5) {
// Collapse batch and feature dimensions.
work_grad_input = work_grad_input.reshape({nbatch * nslices, itime, iheight, iwidth});
work_grad_output = work_grad_output.reshape({nbatch * nslices, otime, oheight, owidth});
work_indices = work_indices.reshape({nbatch * nslices, otime, oheight, owidth});
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(),
"max_pool3d_with_indices_backward_out_frame",
[&] {
const int64_t totalZ = otime * nslices * nbatch;
scalar_t *grad_input_data = work_grad_input.data<scalar_t>();
max_pool3d_with_indices_backward_out_frame(
grad_input_data, work_grad_output, work_indices,
totalZ,
itime, iheight, iwidth,
owidth, oheight,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW);
}
);
}
} // namespace
std::tuple<Tensor&, Tensor&> max_pool3d_with_indices_out_cuda(
Tensor& output,
Tensor& indices,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
max_pool3d_with_indices_out_cuda_template(
output,
indices,
input,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return std::tuple<Tensor&, Tensor&>(output, indices);
}
std::tuple<Tensor, Tensor> max_pool3d_with_indices_cuda(
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
Tensor output = at::empty({0}, input.options());
Tensor indices = at::empty({0}, input.options().dtype(kLong));
max_pool3d_with_indices_out_cuda_template(
output,
indices,
input,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return std::tuple<Tensor, Tensor>(output, indices);
}
Tensor& max_pool3d_with_indices_backward_out_cuda(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode,
const Tensor& indices)
{
max_pool3d_with_indices_backward_out_cuda_template(
gradInput,
gradOutput,
input,
indices,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return gradInput;
}
Tensor max_pool3d_with_indices_backward_cuda(
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode,
const Tensor& indices)
{
auto gradInput = at::zeros_like(input);
max_pool3d_with_indices_backward_out_cuda_template(
gradInput,
gradOutput,
input,
indices,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return gradInput;
}
} // at::native
} // at
| 57aa0530a44e0ca314952085241226a5d0f05e38.cu | #include <ATen/AccumulateType.h>
#include <ATen/native/Pool.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/cuda/detail/KernelUtils.h>
#include <THC/THCNumerics.cuh>
#include <c10/macros/Macros.h>
namespace at {
namespace native {
namespace {
__device__ inline int min(int a, int b) {
return a <= b ? a : b;
}
template <typename scalar_t>
__global__ static void max_pool3d_with_indices_single_out_frame(
scalar_t* inputData,
PackedTensorAccessor<scalar_t, 4> output,
PackedTensorAccessor<int64_t, 4> indices,
int itime, int iheight, int iwidth,
int kT, int kH, int kW,
int dT, int dH, int dW,
int pT, int pH, int pW,
int dilationT, int dilationH, int dilationW,
int offsetZ)
{
int oColumn = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % output.size(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / output.size(1); // output slice/feature
if (oRow < output.size(2) && oColumn < output.size(3))
{
int tStart = oFrame * dT - pT;
int hStart = oRow * dH - pH;
int wStart = oColumn * dW - pW;
int tEnd = min(tStart + (kT - 1) * dilationT + 1, itime);
int hEnd = min(hStart + (kH - 1) * dilationH + 1, iheight);
int wEnd = min(wStart + (kW - 1) * dilationW + 1, iwidth);
while(tStart < 0)
tStart += dilationT;
while(hStart < 0)
hStart += dilationH;
while(wStart < 0)
wStart += dilationW;
int index = 0;
int maxIndex = -1;
inputData += slice * itime * iheight * iwidth;
scalar_t max = THCNumerics<scalar_t>::min();
for (int t = tStart; t < tEnd; t += dilationT)
{
for (int h = hStart; h < hEnd; h += dilationH)
{
for (int w = wStart; w < wEnd; w += dilationW)
{
index = t * iheight * iwidth + h * iwidth + w;
scalar_t val = inputData[index];
if ((max < val) || THCNumerics<scalar_t>::isnan(val))
{
max = val;
maxIndex = index;
}
}
}
}
output[slice][oFrame][oRow][oColumn] = max;
indices[slice][oFrame][oRow][oColumn] = maxIndex;
}
}
template <int KERNEL_WIDTH, typename scalar_t>
__global__ static void max_pool3d_with_indices_single_out_frame(
scalar_t* inputData,
PackedTensorAccessor<scalar_t, 4> output,
PackedTensorAccessor<int64_t, 4> indices,
int itime, int iheight, int iwidth,
int kT, int kH,
int dT, int dH, int dW,
int pT, int pH, int pW,
int dilationT, int dilationH, int dilationW,
int offsetZ)
{
int oColumn = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % output.size(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / output.size(1); // output slice/feature
if (oRow < output.size(2) && oColumn < output.size(3))
{
int tStart = oFrame * dT - pT;
int hStart = oRow * dH - pH;
int wStart = oColumn * dW - pW;
int tEnd = min(tStart + (kT - 1) * dilationT + 1, itime);
int hEnd = min(hStart + (kH - 1) * dilationH + 1, iheight);
int wEnd = min(wStart + (KERNEL_WIDTH - 1) * dilationW + 1, iwidth);
while(tStart < 0)
tStart += dilationT;
while(hStart < 0)
hStart += dilationH;
while(wStart < 0)
wStart += dilationW;
int index = 0;
int maxIndex = -1;
scalar_t max = THCNumerics<scalar_t>::min();
for (int t = tStart; t < tEnd; t += dilationT)
{
for (int h = hStart; h < hEnd; h += dilationH)
{
for (int w = wStart; w < wEnd; w += dilationW)
{
index = t * iheight * iwidth + h * iwidth + w;
scalar_t val = inputData[slice * itime * iheight * iwidth + index];
if (max < val)
{
max = val;
maxIndex = index;
}
}
}
}
output[slice][oFrame][oRow][oColumn] = max;
indices[slice][oFrame][oRow][oColumn] = maxIndex;
}
}
#define UPDATE_OUTPUT_KERNEL_WIDTH(KW) case KW: \
max_pool3d_with_indices_single_out_frame<KW> \
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( \
input_data, \
output.packed_accessor<scalar_t, 4>(), \
indices.packed_accessor<int64_t, 4>(), \
itime, iheight, iwidth, \
kT, kH, \
dT, dH, dW, \
pT, pH, pW, \
dilationT, dilationH, dilationW, offsetZ); \
break
template <typename scalar_t>
void max_pool3d_with_indices_out_frame(
scalar_t* input_data,
const Tensor& output,
const Tensor& indices,
int totalZ,
int itime, int iheight, int iwidth,
int otime, int oheight, int owidth,
int kT, int kH, int kW,
int dT, int dH, int dW,
int pT, int pH, int pW,
int dilationT, int dilationH, int dilationW)
{
int offsetZ = 0;
dim3 block(32, 8);
while (totalZ > 0) {
dim3 grid(cuda::ATenCeilDiv(owidth, static_cast<int>(block.x)),
cuda::ATenCeilDiv(oheight, static_cast<int>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
switch (kW) {
UPDATE_OUTPUT_KERNEL_WIDTH(1);
UPDATE_OUTPUT_KERNEL_WIDTH(2);
UPDATE_OUTPUT_KERNEL_WIDTH(3);
UPDATE_OUTPUT_KERNEL_WIDTH(4);
UPDATE_OUTPUT_KERNEL_WIDTH(5);
UPDATE_OUTPUT_KERNEL_WIDTH(6);
UPDATE_OUTPUT_KERNEL_WIDTH(7);
default:
max_pool3d_with_indices_single_out_frame
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
input_data,
output.packed_accessor<scalar_t, 4>(),
indices.packed_accessor<int64_t, 4>(),
itime, iheight, iwidth,
kT, kH, kW,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW,
offsetZ);
}
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"max_pool3d_backward_out_cuda_frame failed with error code ",
cudaGetLastError());
totalZ -= 65535;
offsetZ += 65535;
}
}
#undef UPDATE_OUTPUT_KERNEL_WIDTH
template <typename scalar_t>
__global__ static void max_pool3d_with_indices_backward_single_out_frame(
scalar_t *gradInputData,
PackedTensorAccessor<scalar_t, 4> gradOutput,
PackedTensorAccessor<int64_t, 4> indices,
int itime, int iheight, int iwidth,
int dT, int dH, int dW,
int pT, int pH, int pW,
int dilationT, int dilationH, int dilationW,
int offsetZ)
{
int oColumn = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = (blockIdx.z + offsetZ) % gradOutput.size(1); // output frame/time
int slice = (blockIdx.z + offsetZ) / gradOutput.size(1); // output slice/feature
if (oRow < gradOutput.size(2) && oColumn < gradOutput.size(3))
{
int maxIndex = indices[slice][oFrame][oRow][oColumn];
if (maxIndex != -1) {
atomicAdd(&gradInputData[slice * itime * iheight * iwidth + maxIndex],
gradOutput[slice][oFrame][oRow][oColumn]);
}
}
}
template <typename scalar_t>
void max_pool3d_with_indices_backward_out_frame(
scalar_t *gradInputData,
const Tensor& gradOutput,
const Tensor& indices,
int64_t totalZ,
int itime, int iheight, int iwidth,
int oheight, int owidth,
int dT, int dH, int dW,
int pT, int pH, int pW,
int dilationT, int dilationH, int dilationW)
{
int offsetZ = 0;
dim3 block(32, 8);
while (totalZ > 0) {
dim3 grid(cuda::ATenCeilDiv(owidth, static_cast<int>(block.x)),
cuda::ATenCeilDiv(oheight, static_cast<int>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
max_pool3d_with_indices_backward_single_out_frame
<<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(
gradInputData,
gradOutput.packed_accessor<scalar_t, 4>(),
indices.packed_accessor<int64_t, 4>(),
itime, iheight, iwidth,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW,
offsetZ);
TORCH_CHECK(cudaGetLastError() == cudaSuccess,
"max_pool3d_with_indices_backward_out_frame failed with error code ",
cudaGetLastError());
totalZ -= 65535;
offsetZ += 65535;
}
}
void max_pool3d_with_indices_out_cuda_template(
Tensor& output,
Tensor& indices,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
TensorArg output_arg{ output, "output", 1 };
TensorArg indices_arg{ indices, "indices", 2 };
TensorArg input_arg{ input, "input", 3 };
checkAllSameGPU("max_pool3d_with_indices_out_cuda",
{output_arg, indices_arg, input_arg});
// XXX [JIT] Pooling.cpp allows stride.empty().
// XXX [LIBTORCH] IntegrationTest.MNIST: padding.size() == 1 && dilation.size() == 1.
TORCH_CHECK(kernel_size.size() == 3 &&
(stride.empty() || stride.size() == 3) &&
(padding.size() == 1 || padding.size() == 3) &&
(dilation.size() == 1 || dilation.size() == 3),
"max_pool3d_with_indices: internal error: all IntArrayRef sizes must be 3");
TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5),
"non-empty 4D or 5D (batch mode) tensor expected for input");
const int kT = safe_downcast<int, int64_t>(kernel_size[0]);
const int kH = safe_downcast<int, int64_t>(kernel_size[1]);
const int kW = safe_downcast<int, int64_t>(kernel_size[2]);
const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]);
const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[1]);
const int dW = stride.empty() ? kW : safe_downcast<int, int64_t>(stride[2]);
const int pT = safe_downcast<int, int64_t>(padding[0]);
const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]);
const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]);
const int dilationT = safe_downcast<int, int64_t>(dilation[0]);
const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]);
const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]);
const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1;
const int64_t nslices = input.size(-4);
const int64_t itime = input.size(-3);
const int64_t iheight = input.size(-2);
const int64_t iwidth = input.size(-1);
const int64_t otime = pooling_output_shape<int64_t>(itime, kT, pT, dT, dilationT, ceil_mode);
const int64_t oheight = pooling_output_shape<int64_t>(iheight, kH, pH, dH, dilationH, ceil_mode);
const int64_t owidth = pooling_output_shape<int64_t>(iwidth, kW, pW, dW, dilationW, ceil_mode);
max_pool3d_with_indices_shape_check(
input,
nslices,
kT, kH, kW,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW,
itime, iheight, iwidth,
otime, oheight, owidth);
if (input.ndimension() == 4) {
output.resize_({ nslices, otime, oheight, owidth});
indices.resize_({nslices, otime, oheight, owidth});
}
else {
output.resize_({nbatch, nslices, otime, oheight, owidth});
indices.resize_({nbatch, nslices, otime, oheight, owidth});
}
Tensor work_input = input.contiguous();
Tensor work_output = output;
Tensor work_indices = indices;
if (input.ndimension() == 5) {
// Collapse batch and feature dimensions.
work_input = work_input.reshape({nbatch * nslices, itime, iheight, iwidth});
work_output = work_output.reshape({nbatch * nslices, otime, oheight, owidth});
work_indices = work_indices.reshape({nbatch * nslices, otime, oheight, owidth});
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(),
"max_pool3d_with_indices_out_frame",
[&]{
scalar_t *input_data = work_input.data<scalar_t>();
int64_t totalZ = otime * nslices * nbatch;
max_pool3d_with_indices_out_frame(
input_data, work_output, work_indices,
totalZ,
itime, iheight, iwidth,
otime, oheight, owidth,
kT, kH, kW,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW);
}
);
}
void max_pool3d_with_indices_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
const Tensor& indices,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
TensorArg gradInput_arg{ gradInput, "gradInput", 1 };
TensorArg gradOutput_arg{ gradOutput, "gradOutput", 2 };
TensorArg input_arg{ input, "input", 3 };
TensorArg indices_arg{ indices, "indices", 4 };
checkAllSameGPU("max_pool3d_with_indices_backward_out_cuda",
{gradInput_arg, gradOutput_arg, input_arg, indices_arg});
// XXX [JIT] Pooling.cpp allows stride.empty().
// XXX [LIBTORCH] IntegrationTest.MNIST: padding.size() == 1 && dilation.size() == 1.
TORCH_CHECK(kernel_size.size() == 3 &&
(stride.empty() || stride.size() == 3) &&
(padding.size() == 1 || padding.size() == 3) &&
(dilation.size() == 1 || dilation.size() == 3),
"max_pool3d_with_indices: internal error: all IntArrayRef sizes must be 3");
TORCH_CHECK((input.ndimension() == 4 || input.ndimension() == 5),
"non-empty 4D or 5D (batch mode) tensor expected for input");
TORCH_CHECK((gradOutput.ndimension() == 4 || gradOutput.ndimension() == 5),
"non-empty 4D or 5D (batch mode) tensor expected for gradOutput");
// Resize and initialize result tensor.
gradInput.resize_as_(input);
gradInput.zero_();
const int kT = safe_downcast<int, int64_t>(kernel_size[0]);
const int kH = safe_downcast<int, int64_t>(kernel_size[1]);
const int kW = safe_downcast<int, int64_t>(kernel_size[2]);
const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]);
const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[1]);
const int dW = stride.empty() ? kW : safe_downcast<int, int64_t>(stride[2]);
const int pT = safe_downcast<int, int64_t>(padding[0]);
const int pH = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[1]);
const int pW = padding.size() == 1 ? pT : safe_downcast<int, int64_t>(padding[2]);
const int dilationT = safe_downcast<int, int64_t>(dilation[0]);
const int dilationH = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[1]);
const int dilationW = dilation.size() == 1 ? dilationT : safe_downcast<int, int64_t>(dilation[2]);
const int64_t nbatch = input.ndimension() == 5 ? input.size(-5) : 1;
const int64_t nslices = input.size(-4);
const int64_t otime = gradOutput.size(-3);
const int64_t oheight = gradOutput.size(-2);
const int64_t owidth = gradOutput.size(-1);
const int64_t itime = gradInput.size(-3);
const int64_t iheight = gradInput.size(-2);
const int64_t iwidth = gradInput.size(-1);
max_pool3d_with_indices_shape_check(
input,
gradOutput,
indices,
nslices,
kT, kH, kW,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW,
itime, iheight, iwidth,
otime, oheight, owidth);
Tensor work_grad_input = gradInput;
Tensor work_grad_output = gradOutput.contiguous();
Tensor work_indices = indices.contiguous();
if (input.ndimension() == 5) {
// Collapse batch and feature dimensions.
work_grad_input = work_grad_input.reshape({nbatch * nslices, itime, iheight, iwidth});
work_grad_output = work_grad_output.reshape({nbatch * nslices, otime, oheight, owidth});
work_indices = work_indices.reshape({nbatch * nslices, otime, oheight, owidth});
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(),
"max_pool3d_with_indices_backward_out_frame",
[&] {
const int64_t totalZ = otime * nslices * nbatch;
scalar_t *grad_input_data = work_grad_input.data<scalar_t>();
max_pool3d_with_indices_backward_out_frame(
grad_input_data, work_grad_output, work_indices,
totalZ,
itime, iheight, iwidth,
owidth, oheight,
dT, dH, dW,
pT, pH, pW,
dilationT, dilationH, dilationW);
}
);
}
} // namespace
std::tuple<Tensor&, Tensor&> max_pool3d_with_indices_out_cuda(
Tensor& output,
Tensor& indices,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
max_pool3d_with_indices_out_cuda_template(
output,
indices,
input,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return std::tuple<Tensor&, Tensor&>(output, indices);
}
std::tuple<Tensor, Tensor> max_pool3d_with_indices_cuda(
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode)
{
Tensor output = at::empty({0}, input.options());
Tensor indices = at::empty({0}, input.options().dtype(kLong));
max_pool3d_with_indices_out_cuda_template(
output,
indices,
input,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return std::tuple<Tensor, Tensor>(output, indices);
}
Tensor& max_pool3d_with_indices_backward_out_cuda(
Tensor& gradInput,
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode,
const Tensor& indices)
{
max_pool3d_with_indices_backward_out_cuda_template(
gradInput,
gradOutput,
input,
indices,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return gradInput;
}
Tensor max_pool3d_with_indices_backward_cuda(
const Tensor& gradOutput,
const Tensor& input,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef dilation,
bool ceil_mode,
const Tensor& indices)
{
auto gradInput = at::zeros_like(input);
max_pool3d_with_indices_backward_out_cuda_template(
gradInput,
gradOutput,
input,
indices,
kernel_size,
stride,
padding,
dilation,
ceil_mode);
return gradInput;
}
} // at::native
} // at
|
12b3f86ddb0d4c59965d69eecb1eb66018c5e0bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include <vector>
#include "caffe/layers/scale_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ScaleForward(const int n, const Dtype* in,
const Dtype* scale, const int scale_dim, const int inner_dim,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int scale_index = (index / inner_dim) % scale_dim;
out[index] = in[index] * scale[scale_index];
}
}
template <typename Dtype>
__global__ void ScaleBiasForward(const int n, const Dtype* in,
const Dtype* scale, const Dtype* bias,
const int scale_dim, const int inner_dim, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int scale_index = (index / inner_dim) % scale_dim;
out[index] = in[index] * scale[scale_index] + bias[scale_index];
}
}
template <typename Dtype>
__global__ void TruncationLowerBounded(const int n, Dtype* in_out, Dtype lower_bound) {
CUDA_KERNEL_LOOP(index, n) {
in_out[index] = max(in_out[index], lower_bound);
}
}
template <typename Dtype>
__global__ void TruncationUpperBounded(const int n, Dtype* in_out, Dtype upper_bound) {
CUDA_KERNEL_LOOP(index, n) {
in_out[index] = min(in_out[index], upper_bound);
}
}
template <typename Dtype>
void ScaleLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const int count = top[0]->count();
const Dtype* bottom_data = bottom[0]->gpu_data();
if (bottom[0] == top[0]) {
// in-place computation; need to store bottom data before overwriting it.
// Note that this is only necessary for Backward; we could skip this if not
// doing Backward, but Caffe currently provides no way of knowing whether
// we'll need to do Backward at the time of the Forward call.
caffe_copy(bottom[0]->count(), bottom[0]->gpu_data(),
temp_.mutable_gpu_data());
}
Dtype* scale_data =
((bottom.size() > 1) ? bottom[1] : this->blobs_[0].get())->mutable_gpu_data();
if (this->layer_param_.scale_param().has_min_value()) {
// NOLINT_NEXT_LINE(whitespace/operators)
TruncationLowerBounded<Dtype> << <CAFFE_GET_BLOCKS(scale_dim_), CAFFE_CUDA_NUM_THREADS >> >(
scale_dim_, scale_data, this->layer_param_.scale_param().min_value());
CUDA_POST_KERNEL_CHECK;
}
if (this->layer_param_.scale_param().has_max_value()) {
// NOLINT_NEXT_LINE(whitespace/operators)
TruncationUpperBounded<Dtype> << <CAFFE_GET_BLOCKS(scale_dim_), CAFFE_CUDA_NUM_THREADS >> >(
scale_dim_, scale_data, this->layer_param_.scale_param().max_value());
CUDA_POST_KERNEL_CHECK;
}
Dtype* top_data = top[0]->mutable_gpu_data();
if (bias_layer_) {
const Dtype* bias_data = this->blobs_[bias_param_id_]->gpu_data();
ScaleBiasForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, scale_data, bias_data, scale_dim_, inner_dim_,
top_data);
} else {
ScaleForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, scale_data, scale_dim_, inner_dim_, top_data);
}
}
template <typename Dtype>
void ScaleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (bias_layer_ &&
this->param_propagate_down_[this->param_propagate_down_.size() - 1]) {
bias_layer_->Backward(top, bias_propagate_down_, bias_bottom_vec_);
}
const bool scale_param = (bottom.size() == 1);
Blob<Dtype>* scale = scale_param ? this->blobs_[0].get() : bottom[1];
if ((!scale_param && propagate_down[1]) ||
(scale_param && this->param_propagate_down_[0])) {
const Dtype* top_diff = top[0]->gpu_diff();
const bool in_place = (bottom[0] == top[0]);
const Dtype* bottom_data = (in_place ? &temp_ : bottom[0])->gpu_data();
// Hack: store big eltwise product in bottom[0] diff, except in the special
// case where this layer itself does the eltwise product, in which case we
// can store it directly in the scale diff, and we're done.
// If we're computing in-place (and not doing eltwise computation), this
// hack doesn't work and we store the product in temp_.
const bool is_eltwise = (bottom[0]->count() == scale->count());
Dtype* product = (is_eltwise ? scale->mutable_gpu_diff() :
(in_place ? temp_.mutable_gpu_data() : bottom[0]->mutable_gpu_diff()));
caffe_gpu_mul(top[0]->count(), top_diff, bottom_data, product);
if (!is_eltwise) {
Dtype* sum_result = NULL;
if (inner_dim_ == 1) {
sum_result = product;
} else if (sum_result_.count() == 1) {
const Dtype* sum_mult = sum_multiplier_.gpu_data();
Dtype* scale_diff = scale->mutable_cpu_diff();
if (scale_param) {
Dtype result;
caffe_gpu_dot(inner_dim_, product, sum_mult, &result);
*scale_diff += result;
} else {
caffe_gpu_dot(inner_dim_, product, sum_mult, scale_diff);
}
} else {
const Dtype* sum_mult = sum_multiplier_.gpu_data();
sum_result = (outer_dim_ == 1) ?
scale->mutable_gpu_diff() : sum_result_.mutable_gpu_data();
caffe_gpu_gemv(CblasNoTrans, sum_result_.count(), inner_dim_,
Dtype(1), product, sum_mult, Dtype(0), sum_result);
}
if (outer_dim_ != 1) {
const Dtype* sum_mult = sum_multiplier_.gpu_data();
if (scale_dim_ == 1) {
Dtype* scale_diff = scale->mutable_cpu_diff();
if (scale_param) {
Dtype result;
caffe_gpu_dot(outer_dim_, sum_mult, sum_result, &result);
*scale_diff += result;
} else {
caffe_gpu_dot(outer_dim_, sum_mult, sum_result, scale_diff);
}
} else {
Dtype* scale_diff = scale->mutable_gpu_diff();
caffe_gpu_gemv(CblasTrans, outer_dim_, scale_dim_,
Dtype(1), sum_result, sum_mult, Dtype(scale_param),
scale_diff);
}
}
}
}
if (propagate_down[0]) {
const int count = top[0]->count();
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* scale_data = scale->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
ScaleForward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, scale_data, scale_dim_, inner_dim_, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ScaleLayer);
} // namespace caffe
| 12b3f86ddb0d4c59965d69eecb1eb66018c5e0bc.cu | #include <cfloat>
#include <vector>
#include "caffe/layers/scale_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ScaleForward(const int n, const Dtype* in,
const Dtype* scale, const int scale_dim, const int inner_dim,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int scale_index = (index / inner_dim) % scale_dim;
out[index] = in[index] * scale[scale_index];
}
}
template <typename Dtype>
__global__ void ScaleBiasForward(const int n, const Dtype* in,
const Dtype* scale, const Dtype* bias,
const int scale_dim, const int inner_dim, Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
const int scale_index = (index / inner_dim) % scale_dim;
out[index] = in[index] * scale[scale_index] + bias[scale_index];
}
}
template <typename Dtype>
__global__ void TruncationLowerBounded(const int n, Dtype* in_out, Dtype lower_bound) {
CUDA_KERNEL_LOOP(index, n) {
in_out[index] = max(in_out[index], lower_bound);
}
}
template <typename Dtype>
__global__ void TruncationUpperBounded(const int n, Dtype* in_out, Dtype upper_bound) {
CUDA_KERNEL_LOOP(index, n) {
in_out[index] = min(in_out[index], upper_bound);
}
}
template <typename Dtype>
void ScaleLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const int count = top[0]->count();
const Dtype* bottom_data = bottom[0]->gpu_data();
if (bottom[0] == top[0]) {
// in-place computation; need to store bottom data before overwriting it.
// Note that this is only necessary for Backward; we could skip this if not
// doing Backward, but Caffe currently provides no way of knowing whether
// we'll need to do Backward at the time of the Forward call.
caffe_copy(bottom[0]->count(), bottom[0]->gpu_data(),
temp_.mutable_gpu_data());
}
Dtype* scale_data =
((bottom.size() > 1) ? bottom[1] : this->blobs_[0].get())->mutable_gpu_data();
if (this->layer_param_.scale_param().has_min_value()) {
// NOLINT_NEXT_LINE(whitespace/operators)
TruncationLowerBounded<Dtype> << <CAFFE_GET_BLOCKS(scale_dim_), CAFFE_CUDA_NUM_THREADS >> >(
scale_dim_, scale_data, this->layer_param_.scale_param().min_value());
CUDA_POST_KERNEL_CHECK;
}
if (this->layer_param_.scale_param().has_max_value()) {
// NOLINT_NEXT_LINE(whitespace/operators)
TruncationUpperBounded<Dtype> << <CAFFE_GET_BLOCKS(scale_dim_), CAFFE_CUDA_NUM_THREADS >> >(
scale_dim_, scale_data, this->layer_param_.scale_param().max_value());
CUDA_POST_KERNEL_CHECK;
}
Dtype* top_data = top[0]->mutable_gpu_data();
if (bias_layer_) {
const Dtype* bias_data = this->blobs_[bias_param_id_]->gpu_data();
ScaleBiasForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, scale_data, bias_data, scale_dim_, inner_dim_,
top_data);
} else {
ScaleForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, scale_data, scale_dim_, inner_dim_, top_data);
}
}
template <typename Dtype>
void ScaleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (bias_layer_ &&
this->param_propagate_down_[this->param_propagate_down_.size() - 1]) {
bias_layer_->Backward(top, bias_propagate_down_, bias_bottom_vec_);
}
const bool scale_param = (bottom.size() == 1);
Blob<Dtype>* scale = scale_param ? this->blobs_[0].get() : bottom[1];
if ((!scale_param && propagate_down[1]) ||
(scale_param && this->param_propagate_down_[0])) {
const Dtype* top_diff = top[0]->gpu_diff();
const bool in_place = (bottom[0] == top[0]);
const Dtype* bottom_data = (in_place ? &temp_ : bottom[0])->gpu_data();
// Hack: store big eltwise product in bottom[0] diff, except in the special
// case where this layer itself does the eltwise product, in which case we
// can store it directly in the scale diff, and we're done.
// If we're computing in-place (and not doing eltwise computation), this
// hack doesn't work and we store the product in temp_.
const bool is_eltwise = (bottom[0]->count() == scale->count());
Dtype* product = (is_eltwise ? scale->mutable_gpu_diff() :
(in_place ? temp_.mutable_gpu_data() : bottom[0]->mutable_gpu_diff()));
caffe_gpu_mul(top[0]->count(), top_diff, bottom_data, product);
if (!is_eltwise) {
Dtype* sum_result = NULL;
if (inner_dim_ == 1) {
sum_result = product;
} else if (sum_result_.count() == 1) {
const Dtype* sum_mult = sum_multiplier_.gpu_data();
Dtype* scale_diff = scale->mutable_cpu_diff();
if (scale_param) {
Dtype result;
caffe_gpu_dot(inner_dim_, product, sum_mult, &result);
*scale_diff += result;
} else {
caffe_gpu_dot(inner_dim_, product, sum_mult, scale_diff);
}
} else {
const Dtype* sum_mult = sum_multiplier_.gpu_data();
sum_result = (outer_dim_ == 1) ?
scale->mutable_gpu_diff() : sum_result_.mutable_gpu_data();
caffe_gpu_gemv(CblasNoTrans, sum_result_.count(), inner_dim_,
Dtype(1), product, sum_mult, Dtype(0), sum_result);
}
if (outer_dim_ != 1) {
const Dtype* sum_mult = sum_multiplier_.gpu_data();
if (scale_dim_ == 1) {
Dtype* scale_diff = scale->mutable_cpu_diff();
if (scale_param) {
Dtype result;
caffe_gpu_dot(outer_dim_, sum_mult, sum_result, &result);
*scale_diff += result;
} else {
caffe_gpu_dot(outer_dim_, sum_mult, sum_result, scale_diff);
}
} else {
Dtype* scale_diff = scale->mutable_gpu_diff();
caffe_gpu_gemv(CblasTrans, outer_dim_, scale_dim_,
Dtype(1), sum_result, sum_mult, Dtype(scale_param),
scale_diff);
}
}
}
}
if (propagate_down[0]) {
const int count = top[0]->count();
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* scale_data = scale->gpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
ScaleForward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, scale_data, scale_dim_, inner_dim_, bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ScaleLayer);
} // namespace caffe
|
7422328a6b0f4f8a40c9b02541623e5942e26e6e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include <hipcub/hipcub.hpp>
#include "caffe2/core/context_gpu.h"
#include "softmax_op.h"
#include "softmax_with_loss_op.h"
#include "spatial_softmax_with_loss_op.h"
namespace caffe2 {
namespace {
__global__ void LabelCrossEntropyKernel(
const int N,
const int D,
const float* logPdata,
const int* labeldata,
const float* weights,
float* Ydata) {
CUDA_1D_KERNEL_LOOP(i, N) {
CUDA_KERNEL_ASSERT(labeldata[i] >= 0 && labeldata[i] < D);
float weight = weights ? weights[i] : 1.0;
Ydata[i] = -logPdata[i * D + labeldata[i]] * weight;
}
}
__global__ void LabelCrossEntropyGradientKernel(
const int N,
const int D,
const float* Pdata,
const int* labeldata,
float* dXdata) {
CUDA_1D_KERNEL_LOOP(i, N) {
int idx = i * D + labeldata[i];
dXdata[idx] = Pdata[idx] - 1.;
}
}
__global__ void LabelCrossEntropyGradientKernelWeighted(
const int N,
const int D,
const float* Pdata,
const int* labeldata,
float* dXdata,
const float* weights) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
int row = i / D;
int d = i % D;
float val = Pdata[i] - 1.0 * (d == labeldata[row]);
float weight = weights[row];
dXdata[i] = val * weight;
}
}
__global__ void ProbCrossEntropyKernel(
const int N,
const int D,
const float* Pdata,
const float* labeldata,
const float* weights,
float* Ydata) {
typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int i = blockIdx.x; i < N; i += gridDim.x) {
float weight = weights ? weights[i] : 1.0;
float sum = 0.0;
float total_prob = 0.0;
for (int j = threadIdx.x; j < D; j += blockDim.x) {
int idx = i * D + j;
CUDA_KERNEL_ASSERT(labeldata[idx] >= 0);
total_prob += labeldata[idx];
sum += -logf(max(Pdata[idx], FLT_MIN)) * labeldata[idx] * weight;
}
float tot = BlockReduce(temp_storage).Sum(sum);
__syncthreads();
float total_prob_sum = BlockReduce(temp_storage).Sum(total_prob);
if (threadIdx.x == 0) {
Ydata[i] = tot;
// Sanity check
CUDA_KERNEL_ASSERT(abs(1.0 - total_prob_sum) < 1e-5f);
}
__syncthreads();
}
}
__global__ void ProbCrossEntropyGradientKernel(
const int N,
const int D,
const float* Pdata,
const float* labeldata,
float* dXdata,
const float* weights) {
if (weights == NULL) {
CUDA_1D_KERNEL_LOOP(idx, N * D) {
dXdata[idx] = Pdata[idx] - labeldata[idx];
}
} else {
CUDA_1D_KERNEL_LOOP(idx, N * D) {
dXdata[idx] = (Pdata[idx] - labeldata[idx]) * weights[idx / D];
}
}
}
__global__ void SpatialSoftmaxKernel(
const int num,
const int D,
const int W,
const int H,
const float* Xdata,
float* Pdata) {
CUDA_1D_KERNEL_LOOP(index, num * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
// Subtract max on each cell for numerical reasons
float max_val = -FLT_MAX;
for(int c = 0; c < D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
max_val = max(max_val, Xdata[idx]);
}
// Exponentiate
float expsum = 0.0f;
for(int c = 0; c < D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
float expx = exp(Xdata[idx] - max_val);
Pdata[idx] = expx;
expsum += expx;
}
// Normalize
for(int c=0; c<D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
Pdata[idx] /= expsum;
}
}
}
#define DONTCARE (-1)
__global__ void SpatialCrossEntropyLossKernel(
const int N,
const int D,
const int W,
const int H,
const float* Pdata,
const int* label_data,
const float* weights,
float* loss_data,
float* weight_data) {
CUDA_1D_KERNEL_LOOP(index, N * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
const int label = static_cast<int>(label_data[index]);
if (label != DONTCARE) {
CUDA_KERNEL_ASSERT(label >= 0 && label < D);
float weight = (weights == NULL ? 1.0 : weights[index]);
loss_data[index] = -log(max(
Pdata[i * W * H * D + label * W * H + y * W + x], 1e-20f)) * weight;
weight_data[index] = weight;
} else {
loss_data[index] = 0;
weight_data[index] = 0;
}
}
}
__global__ void SpatialSoftmaxLossGradientKernel(const int N, const int D,
const int W, const int H, const int* label_data, const float* weights,
float* dX_data, float* weights_) {
CUDA_1D_KERNEL_LOOP(index, N * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
const int label = static_cast<int>(label_data[index]);
if (label != DONTCARE) {
int data_idx = i * (H * W * D) + label * (H * W) + y * W + x;
dX_data[data_idx] -= 1.0;
if (weights != NULL) {
float weight = weights[index];
for (int c = 0; c < D; ++c) {
int data_idx = i * (H * W * D) + c * (H * W) + y * W + x;
dX_data[data_idx] *= weight;
}
weights_[index] = weight;
} else {
weights_[index] = 1.0;
}
} else {
// Ignore-label, so set all gradients for this positions
// tp zero
for (int c = 0; c < D; ++c) {
int data_idx = i * (H * W * D) + c * (H * W) + y * W + x;
dX_data[data_idx] = 0.0;
}
weights_[index] = 0.0;
}
}
}
__global__ void SoftmaxNormalizeLogsKernel(
const int nthreads,
const int D,
const float* logits,
const float* rowmax,
const float* scales,
float* out_log) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / D;
out_log[index] = logits[index] - rowmax[n] - logf(max(scales[n], FLT_MIN));
}
}
__global__ void SoftmaxNormalizeKernel(
const int nthreads,
const int D,
const float* probs,
const float* scales,
float* out) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / D;
out[index] = probs[index] / scales[n];
}
}
void Softmax(
const int N,
const int D,
const float* logits,
const float* sum_multiplier,
float* scales,
float* rowmax,
float* probs,
bool log_softmax,
CUDAContext* context) {
const int size = N * D;
math::RowwiseMax<float, CUDAContext>(N, D, logits, rowmax, context);
// Put the intermediate result X - max(X) into Y
context->Copy<float, CUDAContext, CUDAContext>(size, logits, probs);
// Subtract the scale
math::Gemm<float, CUDAContext>(
CblasNoTrans,
CblasNoTrans,
N,
D,
1,
-1,
rowmax,
sum_multiplier,
1,
probs,
context);
// Exponentiation
math::Exp<float, CUDAContext>(size, probs, probs, context);
// Sum exponentiated values
math::Gemv<float, CUDAContext>(CblasNoTrans, N, D, 1, probs, sum_multiplier,
0, scales, context);
// Normalize
if (!log_softmax) {
hipLaunchKernelGGL(( SoftmaxNormalizeKernel),
dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, D, probs, scales, probs);
} else {
hipLaunchKernelGGL(( SoftmaxNormalizeLogsKernel),
dim3(CAFFE_GET_BLOCKS(size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context->cuda_stream(), size, D, logits, rowmax, scales, probs);
}
}
} // namespace
template<>
bool SoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
auto* P = Output(0); // Probabilities from softmax
auto* avg_loss = Output(1); // Average loss
const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL);
const auto canonical_axis = X.canonical_axis_index(axis_);
int N, D;
N = X.size_to_dim(canonical_axis); // batch size
D = X.size_from_dim(canonical_axis);
P->ResizeLike(X);
total_weight_ptr_.Resize(1);
if (label_prob_mode_) {
CAFFE_ENFORCE_GE(T.ndim(), 2);
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), D);
} else {
if (T.ndim() == canonical_axis) {
CAFFE_ENFORCE_EQ(T.size(), N);
} else {
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), 1);
}
}
avg_loss->Resize(vector<TIndex>());
if (losses_.size() != N) {
losses_.Resize(N);
}
if (rowmax_.size() != N) {
rowmax_.Resize(N);
}
if (sum_multiplier_.size() != D) {
sum_multiplier_.Resize(D);
math::Set<float, CUDAContext>(
D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
}
Softmax(
N,
D,
X.data<float>(),
sum_multiplier_.data<float>(),
losses_.mutable_data<float>(),
rowmax_.mutable_data<float>(),
P->mutable_data<float>(),
!label_prob_mode_, // logarithmic output
&context_);
// Compute label xent loss per example
if (!label_prob_mode_) {
hipLaunchKernelGGL(( LabelCrossEntropyKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
P->data<float>(),
T.data<int>(),
weights,
losses_.mutable_data<float>());
// Since we had logarithmic output, we need to exponentiate
// them again.
math::Exp<float, CUDAContext>(
N * D, P->data<float>(), P->mutable_data<float>(), &context_);
} else {
hipLaunchKernelGGL(( ProbCrossEntropyKernel),
dim3(::min(N, CAFFE_MAXIMUM_NUM_BLOCKS)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
P->data<float>(),
T.data<float>(),
weights,
losses_.mutable_data<float>());
}
float total_weight = N;
if (weights) {
// Sum weights
math::Sum<float, CUDAContext>(
N, weights, total_weight_ptr_.mutable_data<float>(), &context_, &scratch_);
CUDA_CHECK(hipMemcpyAsync(
&total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
hipMemcpyDeviceToHost,
context_.cuda_stream()));
}
// Sum of all losses
float* avg_loss_data = avg_loss->mutable_data<float>();
math::Sum<float, CUDAContext>(
losses_.size(), losses_.data<float>(), avg_loss_data, &context_, &scratch_);
// Average of input batch size
if (total_weight > 0) {
math::Scale<float, CUDAContext>(
1, scale_ / total_weight, avg_loss_data, avg_loss_data, &context_);
}
return true;
}
template <>
bool SpatialSoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
auto* P = Output(0); // Probabilities from softmax
auto* avg_loss = Output(1); // Average loss
const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL);
int N, D;
N = X.dim32(0);
D = X.dim32(1);
P->ResizeLike(X);
total_weight_ptr_.Resize(1);
CAFFE_ENFORCE_EQ(X.ndim(), 4);
CAFFE_ENFORCE_EQ(T.ndim(), 3);
CAFFE_ENFORCE_EQ(T.dim32(0), N);
int H = X.dim32(2);
int W = X.dim32(3);
if (losses_.size() != N * W * H) {
losses_.Resize(N * W * H);
}
if (weights_.size() != N * W * H) {
weights_.Resize(N * W * H);
}
const float* Xdata = X.data<float>();
float* Pdata = P->mutable_data<float>();
// Softmax for each x,y location
hipLaunchKernelGGL(( SpatialSoftmaxKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(), N, D, W, H, Xdata, Pdata);
// Cross entropy
avg_loss->Resize(vector<TIndex>());
float* avg_loss_data = avg_loss->mutable_data<float>();
math::Set<float, CUDAContext>(1, 0.0f, avg_loss_data, &context_);
const int* label_data = T.data<int>();
math::Set<float, CUDAContext>(
1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_);
hipLaunchKernelGGL(( SpatialCrossEntropyLossKernel),
dim3(CAFFE_GET_BLOCKS(N * W * H)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
W,
H,
P->data<float>(),
label_data,
weights,
losses_.mutable_data<float>(),
weights_.mutable_data<float>());
// Somewhat awkward scalar passing from device to host
float h_total_weight;
math::Sum<float, CUDAContext>(
weights_.size(),
weights_.data<float>(),
total_weight_ptr_.mutable_data<float>(),
&context_,
&scratch_);
CUDA_CHECK(hipMemcpyAsync(
&h_total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
hipMemcpyDeviceToHost,
context_.cuda_stream()));
math::Sum<float, CUDAContext>(
losses_.size(), losses_.data<float>(), avg_loss_data, &context_, &scratch_);
// Final scaling
if (h_total_weight > 0) {
math::Scale<float, CUDAContext>(
1, scale_ / h_total_weight, avg_loss_data, avg_loss_data, &context_);
}
return true;
}
template <>
bool SoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
// Input(2) is weights, if given
auto& P = Input(InputSize() - 2); // Probabilities from softmax
auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss
const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL);
auto* dX = Output(0);
dX->ResizeLike(X);
const auto canonical_axis = X.canonical_axis_index(axis_);
int N, D;
N = X.size_to_dim(canonical_axis); // batch size
D = X.size_from_dim(canonical_axis);
if (only_loss_) {
// Memory saving trick to share the buffer with the softmax output.
// Softmax output is thus overwritten.
dX->ShareData(P);
}
total_weight_ptr_.Resize(1);
if (label_prob_mode_) {
CAFFE_ENFORCE_GE(T.ndim(), 2);
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), D);
} else {
if (T.ndim() == canonical_axis) {
CAFFE_ENFORCE_EQ(T.size(), N);
} else {
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), 1);
}
}
// Subtract 1 from labeled positions
if (!label_prob_mode_) {
if (weights == nullptr) {
// Copy softmax probabilities into dX
if (!only_loss_) {
context_.Copy<float, CUDAContext, CUDAContext>(
P.size(), P.data<float>(), dX->mutable_data<float>());
}
hipLaunchKernelGGL(( LabelCrossEntropyGradientKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N, D, P.data<float>(), T.data<int>(), dX->mutable_data<float>());
} else {
// Weighted version gets the Pdata values internally
hipLaunchKernelGGL(( LabelCrossEntropyGradientKernelWeighted),
dim3(CAFFE_GET_BLOCKS(N * D)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
P.data<float>(),
T.data<int>(),
dX->mutable_data<float>(),
weights);
}
} else {
hipLaunchKernelGGL(( ProbCrossEntropyGradientKernel),
dim3(CAFFE_GET_BLOCKS(N * D)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
D,
P.data<float>(),
T.data<float>(),
dX->mutable_data<float>(),
weights);
}
float total_weight = N;
if (weights) {
// Sum weights
math::Sum<float, CUDAContext>(
N, weights, total_weight_ptr_.mutable_data<float>(), &context_, &scratch_);
CUDA_CHECK(hipMemcpyAsync(
&total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
hipMemcpyDeviceToHost,
context_.cuda_stream()));
}
// Scale by d_avg_loss / N
if (total_weight > 0) {
math::Scale<float, CUDAContext>(
dX->size(),
scale_ / total_weight,
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
}
math::Scale<float, CUDAContext>(
dX->size(),
d_avg_loss.data<float>(),
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
return true;
}
template <>
bool SpatialSoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
// Input(2) is weights, if given
auto& P = Input(InputSize() - 2); // Probabilities from softmax
auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss
const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL);
auto* dX = Output(0);
dX->ResizeLike(X);
const auto canonical_axis = X.canonical_axis_index(1);
int N, D;
N = X.dim32(0);
D = X.dim32(1);
if (only_loss_) {
// Memory saving trick to share the buffer with the softmax output.
// Softmax output is thus overwritten.
dX->ShareData(P);
}
total_weight_ptr_.Resize(1);
// Spatial mode, compute softmax for each x, y location
CAFFE_ENFORCE_EQ(X.ndim(), 4);
CAFFE_ENFORCE_EQ(T.ndim(), 3);
int H = X.dim32(2);
int W = X.dim32(3);
dX->ResizeLike(X);
if (weights_.size() != N * W * H) {
weights_.Resize(N * W * H);
}
const float* Pdata = P.data<float>();
float* dX_data = dX->mutable_data<float>();
const int* label_data = T.data<int>();
const float* d_avg_loss_data = d_avg_loss.data<float>();
// Copy softmax probabilities into dX. All but the neuron
// corresponding to the correct label has gradient equaling e(x_j)
// which is the probability under softmax.
context_.Copy<float, CUDAContext, CUDAContext>(P.size(), Pdata, dX_data);
math::Set<float, CUDAContext>(
1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_);
hipLaunchKernelGGL(( SpatialSoftmaxLossGradientKernel),
dim3(CAFFE_GET_BLOCKS(N * W * H)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N, D, W, H, label_data, weights, dX_data, weights_.mutable_data<float>());
math::Sum<float, CUDAContext>(
weights_.size(),
weights_.data<float>(),
total_weight_ptr_.mutable_data<float>(),
&context_,
&scratch_);
// Somewhat awkward scalar passing from device to host
float h_total_weight;
CUDA_CHECK(hipMemcpyAsync(
&h_total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
hipMemcpyDeviceToHost,
context_.cuda_stream()));
// Final scaling
if (h_total_weight > 0) {
math::Scale<float, CUDAContext>(
dX->size(),
scale_ / h_total_weight,
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
}
math::Scale<float, CUDAContext>(
dX->size(),
d_avg_loss.data<float>(),
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
return true;
}
// Implementation for the CUDA context.
template <>
bool SoftmaxOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* P = Output(0);
const auto canonical_axis = X.canonical_axis_index(axis_);
const int N = X.size_to_dim(canonical_axis);
const int D = X.size_from_dim(canonical_axis);
P->ResizeLike(X);
auto* P_data = P->mutable_data<float>();
if (N == 0) {
return true;
}
if (sum_multiplier_.size() != D) {
sum_multiplier_.Resize(D);
math::Set<float, CUDAContext>(
D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
}
if (scale_.size() != N) {
scale_.Resize(N);
}
if (rowmax_.size() != N) {
rowmax_.Resize(N);
}
Softmax(
N,
D,
X.data<float>(),
sum_multiplier_.data<float>(),
scale_.mutable_data<float>(),
rowmax_.mutable_data<float>(),
P_data,
false,
&context_);
return true;
}
#define SOFTMAX_NUM_THREADS 128
// The softmax gradient kernel. This kernel has to be called with the number of
// threads per block being no more than SOFTMAX_NUM_THREADS.
namespace {
__global__ void softmax_gradient_kernel(
const int dim,
const float* Y,
const float* dY,
float* dX) {
Y += blockIdx.x * dim;
dY += blockIdx.x * dim;
dX += blockIdx.x * dim;
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SOFTMAX_NUM_THREADS];
float tmp;
// A two-level reduction to compute the inner products.
tmp = 0;
for (int i = idx; i < dim; i += blockDim.x) {
tmp += dY[i] * Y[i];
}
reduction_buffer[idx] = tmp;
__syncthreads();
if (idx == 0) {
tmp = reduction_buffer[0];
for (int i = 1; i < blockDim.x; ++i)
tmp += reduction_buffer[i];
reduction_buffer[0] = tmp;
}
__syncthreads();
// Compute gradient.
tmp = reduction_buffer[0];
for (int i = idx; i < dim; i += blockDim.x) {
dX[i] = Y[i] * (dY[i] - tmp);
}
}
} // namespace
template <>
bool SoftmaxGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
const auto canonical_axis = Y.canonical_axis_index(axis_);
const int N = Y.size_to_dim(canonical_axis);
const int D = Y.size_from_dim(canonical_axis);
dX->ResizeLike(Y);
auto* dX_data = dX->mutable_data<float>();
if (N == 0) {
return true;
}
hipLaunchKernelGGL(( softmax_gradient_kernel),
dim3(N),
dim3(SOFTMAX_NUM_THREADS),
0,
context_.cuda_stream(), D, Y.data<float>(), dY.data<float>(), dX_data);
return true;
}
REGISTER_CUDA_OPERATOR(SoftmaxWithLoss,
SoftmaxWithLossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SoftmaxWithLossGradient,
SoftmaxWithLossGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SpatialSoftmaxWithLoss,
SpatialSoftmaxWithLossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SpatialSoftmaxWithLossGradient,
SpatialSoftmaxWithLossGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(Softmax, SoftmaxOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SoftmaxGradient, SoftmaxGradientOp<float, CUDAContext>);
} // namespace caffe2
| 7422328a6b0f4f8a40c9b02541623e5942e26e6e.cu | #include <cfloat>
#include <cub/block/block_reduce.cuh>
#include "caffe2/core/context_gpu.h"
#include "softmax_op.h"
#include "softmax_with_loss_op.h"
#include "spatial_softmax_with_loss_op.h"
namespace caffe2 {
namespace {
__global__ void LabelCrossEntropyKernel(
const int N,
const int D,
const float* logPdata,
const int* labeldata,
const float* weights,
float* Ydata) {
CUDA_1D_KERNEL_LOOP(i, N) {
CUDA_KERNEL_ASSERT(labeldata[i] >= 0 && labeldata[i] < D);
float weight = weights ? weights[i] : 1.0;
Ydata[i] = -logPdata[i * D + labeldata[i]] * weight;
}
}
__global__ void LabelCrossEntropyGradientKernel(
const int N,
const int D,
const float* Pdata,
const int* labeldata,
float* dXdata) {
CUDA_1D_KERNEL_LOOP(i, N) {
int idx = i * D + labeldata[i];
dXdata[idx] = Pdata[idx] - 1.;
}
}
__global__ void LabelCrossEntropyGradientKernelWeighted(
const int N,
const int D,
const float* Pdata,
const int* labeldata,
float* dXdata,
const float* weights) {
CUDA_1D_KERNEL_LOOP(i, N * D) {
int row = i / D;
int d = i % D;
float val = Pdata[i] - 1.0 * (d == labeldata[row]);
float weight = weights[row];
dXdata[i] = val * weight;
}
}
__global__ void ProbCrossEntropyKernel(
const int N,
const int D,
const float* Pdata,
const float* labeldata,
const float* weights,
float* Ydata) {
typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce;
__shared__ typename BlockReduce::TempStorage temp_storage;
for (int i = blockIdx.x; i < N; i += gridDim.x) {
float weight = weights ? weights[i] : 1.0;
float sum = 0.0;
float total_prob = 0.0;
for (int j = threadIdx.x; j < D; j += blockDim.x) {
int idx = i * D + j;
CUDA_KERNEL_ASSERT(labeldata[idx] >= 0);
total_prob += labeldata[idx];
sum += -logf(max(Pdata[idx], FLT_MIN)) * labeldata[idx] * weight;
}
float tot = BlockReduce(temp_storage).Sum(sum);
__syncthreads();
float total_prob_sum = BlockReduce(temp_storage).Sum(total_prob);
if (threadIdx.x == 0) {
Ydata[i] = tot;
// Sanity check
CUDA_KERNEL_ASSERT(abs(1.0 - total_prob_sum) < 1e-5f);
}
__syncthreads();
}
}
__global__ void ProbCrossEntropyGradientKernel(
const int N,
const int D,
const float* Pdata,
const float* labeldata,
float* dXdata,
const float* weights) {
if (weights == NULL) {
CUDA_1D_KERNEL_LOOP(idx, N * D) {
dXdata[idx] = Pdata[idx] - labeldata[idx];
}
} else {
CUDA_1D_KERNEL_LOOP(idx, N * D) {
dXdata[idx] = (Pdata[idx] - labeldata[idx]) * weights[idx / D];
}
}
}
__global__ void SpatialSoftmaxKernel(
const int num,
const int D,
const int W,
const int H,
const float* Xdata,
float* Pdata) {
CUDA_1D_KERNEL_LOOP(index, num * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
// Subtract max on each cell for numerical reasons
float max_val = -FLT_MAX;
for(int c = 0; c < D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
max_val = max(max_val, Xdata[idx]);
}
// Exponentiate
float expsum = 0.0f;
for(int c = 0; c < D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
float expx = exp(Xdata[idx] - max_val);
Pdata[idx] = expx;
expsum += expx;
}
// Normalize
for(int c=0; c<D; ++c) {
int idx = i * (H * W * D) + c * (H * W) + y * W + x;
Pdata[idx] /= expsum;
}
}
}
#define DONTCARE (-1)
__global__ void SpatialCrossEntropyLossKernel(
const int N,
const int D,
const int W,
const int H,
const float* Pdata,
const int* label_data,
const float* weights,
float* loss_data,
float* weight_data) {
CUDA_1D_KERNEL_LOOP(index, N * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
const int label = static_cast<int>(label_data[index]);
if (label != DONTCARE) {
CUDA_KERNEL_ASSERT(label >= 0 && label < D);
float weight = (weights == NULL ? 1.0 : weights[index]);
loss_data[index] = -log(max(
Pdata[i * W * H * D + label * W * H + y * W + x], 1e-20f)) * weight;
weight_data[index] = weight;
} else {
loss_data[index] = 0;
weight_data[index] = 0;
}
}
}
__global__ void SpatialSoftmaxLossGradientKernel(const int N, const int D,
const int W, const int H, const int* label_data, const float* weights,
float* dX_data, float* weights_) {
CUDA_1D_KERNEL_LOOP(index, N * W * H) {
int x = index % W;
int y = (index / W) % H;
int i = index / W / H;
const int label = static_cast<int>(label_data[index]);
if (label != DONTCARE) {
int data_idx = i * (H * W * D) + label * (H * W) + y * W + x;
dX_data[data_idx] -= 1.0;
if (weights != NULL) {
float weight = weights[index];
for (int c = 0; c < D; ++c) {
int data_idx = i * (H * W * D) + c * (H * W) + y * W + x;
dX_data[data_idx] *= weight;
}
weights_[index] = weight;
} else {
weights_[index] = 1.0;
}
} else {
// Ignore-label, so set all gradients for this positions
// tp zero
for (int c = 0; c < D; ++c) {
int data_idx = i * (H * W * D) + c * (H * W) + y * W + x;
dX_data[data_idx] = 0.0;
}
weights_[index] = 0.0;
}
}
}
__global__ void SoftmaxNormalizeLogsKernel(
const int nthreads,
const int D,
const float* logits,
const float* rowmax,
const float* scales,
float* out_log) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / D;
out_log[index] = logits[index] - rowmax[n] - logf(max(scales[n], FLT_MIN));
}
}
__global__ void SoftmaxNormalizeKernel(
const int nthreads,
const int D,
const float* probs,
const float* scales,
float* out) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index / D;
out[index] = probs[index] / scales[n];
}
}
void Softmax(
const int N,
const int D,
const float* logits,
const float* sum_multiplier,
float* scales,
float* rowmax,
float* probs,
bool log_softmax,
CUDAContext* context) {
const int size = N * D;
math::RowwiseMax<float, CUDAContext>(N, D, logits, rowmax, context);
// Put the intermediate result X - max(X) into Y
context->Copy<float, CUDAContext, CUDAContext>(size, logits, probs);
// Subtract the scale
math::Gemm<float, CUDAContext>(
CblasNoTrans,
CblasNoTrans,
N,
D,
1,
-1,
rowmax,
sum_multiplier,
1,
probs,
context);
// Exponentiation
math::Exp<float, CUDAContext>(size, probs, probs, context);
// Sum exponentiated values
math::Gemv<float, CUDAContext>(CblasNoTrans, N, D, 1, probs, sum_multiplier,
0, scales, context);
// Normalize
if (!log_softmax) {
SoftmaxNormalizeKernel<<<
CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, D, probs, scales, probs);
} else {
SoftmaxNormalizeLogsKernel<<<
CAFFE_GET_BLOCKS(size),
CAFFE_CUDA_NUM_THREADS,
0,
context->cuda_stream()>>>(size, D, logits, rowmax, scales, probs);
}
}
} // namespace
template<>
bool SoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
auto* P = Output(0); // Probabilities from softmax
auto* avg_loss = Output(1); // Average loss
const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL);
const auto canonical_axis = X.canonical_axis_index(axis_);
int N, D;
N = X.size_to_dim(canonical_axis); // batch size
D = X.size_from_dim(canonical_axis);
P->ResizeLike(X);
total_weight_ptr_.Resize(1);
if (label_prob_mode_) {
CAFFE_ENFORCE_GE(T.ndim(), 2);
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), D);
} else {
if (T.ndim() == canonical_axis) {
CAFFE_ENFORCE_EQ(T.size(), N);
} else {
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), 1);
}
}
avg_loss->Resize(vector<TIndex>());
if (losses_.size() != N) {
losses_.Resize(N);
}
if (rowmax_.size() != N) {
rowmax_.Resize(N);
}
if (sum_multiplier_.size() != D) {
sum_multiplier_.Resize(D);
math::Set<float, CUDAContext>(
D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
}
Softmax(
N,
D,
X.data<float>(),
sum_multiplier_.data<float>(),
losses_.mutable_data<float>(),
rowmax_.mutable_data<float>(),
P->mutable_data<float>(),
!label_prob_mode_, // logarithmic output
&context_);
// Compute label xent loss per example
if (!label_prob_mode_) {
LabelCrossEntropyKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
P->data<float>(),
T.data<int>(),
weights,
losses_.mutable_data<float>());
// Since we had logarithmic output, we need to exponentiate
// them again.
math::Exp<float, CUDAContext>(
N * D, P->data<float>(), P->mutable_data<float>(), &context_);
} else {
ProbCrossEntropyKernel<<<
std::min(N, CAFFE_MAXIMUM_NUM_BLOCKS),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
P->data<float>(),
T.data<float>(),
weights,
losses_.mutable_data<float>());
}
float total_weight = N;
if (weights) {
// Sum weights
math::Sum<float, CUDAContext>(
N, weights, total_weight_ptr_.mutable_data<float>(), &context_, &scratch_);
CUDA_CHECK(cudaMemcpyAsync(
&total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
cudaMemcpyDeviceToHost,
context_.cuda_stream()));
}
// Sum of all losses
float* avg_loss_data = avg_loss->mutable_data<float>();
math::Sum<float, CUDAContext>(
losses_.size(), losses_.data<float>(), avg_loss_data, &context_, &scratch_);
// Average of input batch size
if (total_weight > 0) {
math::Scale<float, CUDAContext>(
1, scale_ / total_weight, avg_loss_data, avg_loss_data, &context_);
}
return true;
}
template <>
bool SpatialSoftmaxWithLossOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
auto* P = Output(0); // Probabilities from softmax
auto* avg_loss = Output(1); // Average loss
const float* weights = (InputSize() > 2 ? Input(2).data<float>() : NULL);
int N, D;
N = X.dim32(0);
D = X.dim32(1);
P->ResizeLike(X);
total_weight_ptr_.Resize(1);
CAFFE_ENFORCE_EQ(X.ndim(), 4);
CAFFE_ENFORCE_EQ(T.ndim(), 3);
CAFFE_ENFORCE_EQ(T.dim32(0), N);
int H = X.dim32(2);
int W = X.dim32(3);
if (losses_.size() != N * W * H) {
losses_.Resize(N * W * H);
}
if (weights_.size() != N * W * H) {
weights_.Resize(N * W * H);
}
const float* Xdata = X.data<float>();
float* Pdata = P->mutable_data<float>();
// Softmax for each x,y location
SpatialSoftmaxKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(N, D, W, H, Xdata, Pdata);
// Cross entropy
avg_loss->Resize(vector<TIndex>());
float* avg_loss_data = avg_loss->mutable_data<float>();
math::Set<float, CUDAContext>(1, 0.0f, avg_loss_data, &context_);
const int* label_data = T.data<int>();
math::Set<float, CUDAContext>(
1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_);
SpatialCrossEntropyLossKernel<<<
CAFFE_GET_BLOCKS(N * W * H),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
W,
H,
P->data<float>(),
label_data,
weights,
losses_.mutable_data<float>(),
weights_.mutable_data<float>());
// Somewhat awkward scalar passing from device to host
float h_total_weight;
math::Sum<float, CUDAContext>(
weights_.size(),
weights_.data<float>(),
total_weight_ptr_.mutable_data<float>(),
&context_,
&scratch_);
CUDA_CHECK(cudaMemcpyAsync(
&h_total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
cudaMemcpyDeviceToHost,
context_.cuda_stream()));
math::Sum<float, CUDAContext>(
losses_.size(), losses_.data<float>(), avg_loss_data, &context_, &scratch_);
// Final scaling
if (h_total_weight > 0) {
math::Scale<float, CUDAContext>(
1, scale_ / h_total_weight, avg_loss_data, avg_loss_data, &context_);
}
return true;
}
template <>
bool SoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
// Input(2) is weights, if given
auto& P = Input(InputSize() - 2); // Probabilities from softmax
auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss
const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL);
auto* dX = Output(0);
dX->ResizeLike(X);
const auto canonical_axis = X.canonical_axis_index(axis_);
int N, D;
N = X.size_to_dim(canonical_axis); // batch size
D = X.size_from_dim(canonical_axis);
if (only_loss_) {
// Memory saving trick to share the buffer with the softmax output.
// Softmax output is thus overwritten.
dX->ShareData(P);
}
total_weight_ptr_.Resize(1);
if (label_prob_mode_) {
CAFFE_ENFORCE_GE(T.ndim(), 2);
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), D);
} else {
if (T.ndim() == canonical_axis) {
CAFFE_ENFORCE_EQ(T.size(), N);
} else {
CAFFE_ENFORCE_EQ(T.size_to_dim(canonical_axis), N);
CAFFE_ENFORCE_EQ(T.size_from_dim(canonical_axis), 1);
}
}
// Subtract 1 from labeled positions
if (!label_prob_mode_) {
if (weights == nullptr) {
// Copy softmax probabilities into dX
if (!only_loss_) {
context_.Copy<float, CUDAContext, CUDAContext>(
P.size(), P.data<float>(), dX->mutable_data<float>());
}
LabelCrossEntropyGradientKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, D, P.data<float>(), T.data<int>(), dX->mutable_data<float>());
} else {
// Weighted version gets the Pdata values internally
LabelCrossEntropyGradientKernelWeighted<<<
CAFFE_GET_BLOCKS(N * D),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
P.data<float>(),
T.data<int>(),
dX->mutable_data<float>(),
weights);
}
} else {
ProbCrossEntropyGradientKernel<<<
CAFFE_GET_BLOCKS(N * D),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
D,
P.data<float>(),
T.data<float>(),
dX->mutable_data<float>(),
weights);
}
float total_weight = N;
if (weights) {
// Sum weights
math::Sum<float, CUDAContext>(
N, weights, total_weight_ptr_.mutable_data<float>(), &context_, &scratch_);
CUDA_CHECK(cudaMemcpyAsync(
&total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
cudaMemcpyDeviceToHost,
context_.cuda_stream()));
}
// Scale by d_avg_loss / N
if (total_weight > 0) {
math::Scale<float, CUDAContext>(
dX->size(),
scale_ / total_weight,
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
}
math::Scale<float, CUDAContext>(
dX->size(),
d_avg_loss.data<float>(),
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
return true;
}
template <>
bool SpatialSoftmaxWithLossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0); // Logits
auto& T = Input(1); // Labels / targets
// Input(2) is weights, if given
auto& P = Input(InputSize() - 2); // Probabilities from softmax
auto& d_avg_loss = Input(InputSize() - 1); // Gradient w.r.t. avg loss
const float* weights = (InputSize() > 4 ? Input(2).data<float>() : NULL);
auto* dX = Output(0);
dX->ResizeLike(X);
const auto canonical_axis = X.canonical_axis_index(1);
int N, D;
N = X.dim32(0);
D = X.dim32(1);
if (only_loss_) {
// Memory saving trick to share the buffer with the softmax output.
// Softmax output is thus overwritten.
dX->ShareData(P);
}
total_weight_ptr_.Resize(1);
// Spatial mode, compute softmax for each x, y location
CAFFE_ENFORCE_EQ(X.ndim(), 4);
CAFFE_ENFORCE_EQ(T.ndim(), 3);
int H = X.dim32(2);
int W = X.dim32(3);
dX->ResizeLike(X);
if (weights_.size() != N * W * H) {
weights_.Resize(N * W * H);
}
const float* Pdata = P.data<float>();
float* dX_data = dX->mutable_data<float>();
const int* label_data = T.data<int>();
const float* d_avg_loss_data = d_avg_loss.data<float>();
// Copy softmax probabilities into dX. All but the neuron
// corresponding to the correct label has gradient equaling e(x_j)
// which is the probability under softmax.
context_.Copy<float, CUDAContext, CUDAContext>(P.size(), Pdata, dX_data);
math::Set<float, CUDAContext>(
1, 0.0f, total_weight_ptr_.mutable_data<float>(), &context_);
SpatialSoftmaxLossGradientKernel<<<
CAFFE_GET_BLOCKS(N * W * H),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N, D, W, H, label_data, weights, dX_data, weights_.mutable_data<float>());
math::Sum<float, CUDAContext>(
weights_.size(),
weights_.data<float>(),
total_weight_ptr_.mutable_data<float>(),
&context_,
&scratch_);
// Somewhat awkward scalar passing from device to host
float h_total_weight;
CUDA_CHECK(cudaMemcpyAsync(
&h_total_weight,
total_weight_ptr_.data<float>(),
sizeof(float),
cudaMemcpyDeviceToHost,
context_.cuda_stream()));
// Final scaling
if (h_total_weight > 0) {
math::Scale<float, CUDAContext>(
dX->size(),
scale_ / h_total_weight,
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
}
math::Scale<float, CUDAContext>(
dX->size(),
d_avg_loss.data<float>(),
dX->data<float>(),
dX->mutable_data<float>(),
&context_);
return true;
}
// Implementation for the CUDA context.
template <>
bool SoftmaxOp<float, CUDAContext>::RunOnDevice() {
auto& X = Input(0);
auto* P = Output(0);
const auto canonical_axis = X.canonical_axis_index(axis_);
const int N = X.size_to_dim(canonical_axis);
const int D = X.size_from_dim(canonical_axis);
P->ResizeLike(X);
auto* P_data = P->mutable_data<float>();
if (N == 0) {
return true;
}
if (sum_multiplier_.size() != D) {
sum_multiplier_.Resize(D);
math::Set<float, CUDAContext>(
D, 1.f, sum_multiplier_.mutable_data<float>(), &context_);
}
if (scale_.size() != N) {
scale_.Resize(N);
}
if (rowmax_.size() != N) {
rowmax_.Resize(N);
}
Softmax(
N,
D,
X.data<float>(),
sum_multiplier_.data<float>(),
scale_.mutable_data<float>(),
rowmax_.mutable_data<float>(),
P_data,
false,
&context_);
return true;
}
#define SOFTMAX_NUM_THREADS 128
// The softmax gradient kernel. This kernel has to be called with the number of
// threads per block being no more than SOFTMAX_NUM_THREADS.
namespace {
__global__ void softmax_gradient_kernel(
const int dim,
const float* Y,
const float* dY,
float* dX) {
Y += blockIdx.x * dim;
dY += blockIdx.x * dim;
dX += blockIdx.x * dim;
const int idx = threadIdx.x;
__shared__ float reduction_buffer[SOFTMAX_NUM_THREADS];
float tmp;
// A two-level reduction to compute the inner products.
tmp = 0;
for (int i = idx; i < dim; i += blockDim.x) {
tmp += dY[i] * Y[i];
}
reduction_buffer[idx] = tmp;
__syncthreads();
if (idx == 0) {
tmp = reduction_buffer[0];
for (int i = 1; i < blockDim.x; ++i)
tmp += reduction_buffer[i];
reduction_buffer[0] = tmp;
}
__syncthreads();
// Compute gradient.
tmp = reduction_buffer[0];
for (int i = idx; i < dim; i += blockDim.x) {
dX[i] = Y[i] * (dY[i] - tmp);
}
}
} // namespace
template <>
bool SoftmaxGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(0);
auto& dY = Input(1);
auto* dX = Output(0);
const auto canonical_axis = Y.canonical_axis_index(axis_);
const int N = Y.size_to_dim(canonical_axis);
const int D = Y.size_from_dim(canonical_axis);
dX->ResizeLike(Y);
auto* dX_data = dX->mutable_data<float>();
if (N == 0) {
return true;
}
softmax_gradient_kernel<<<
N,
SOFTMAX_NUM_THREADS,
0,
context_.cuda_stream()>>>(D, Y.data<float>(), dY.data<float>(), dX_data);
return true;
}
REGISTER_CUDA_OPERATOR(SoftmaxWithLoss,
SoftmaxWithLossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SoftmaxWithLossGradient,
SoftmaxWithLossGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SpatialSoftmaxWithLoss,
SpatialSoftmaxWithLossOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(
SpatialSoftmaxWithLossGradient,
SpatialSoftmaxWithLossGradientOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(Softmax, SoftmaxOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(SoftmaxGradient, SoftmaxGradientOp<float, CUDAContext>);
} // namespace caffe2
|
3ab55889f42145d586b9fb8c9c0f2c8b3236a4cf.hip | // !!! This is a file automatically generated by hipify!!!
// includes, cuda
#include <hip/hip_runtime.h>
#include <cudaDefs.h>
#include <imageManager.h>
#include "imageKernels.cuh"
#define BLOCK_DIM 8
texture<float, 2, hipReadModeElementType> tex_ref;
hipChannelFormatDesc tex_channel_desc;
unsigned char *d_image_data = nullptr;
unsigned int image_width;
unsigned int image_height;
unsigned int image_bpp; //Bits Per Pixel = 8, 16, 24, or 32 bit
unsigned int image_pitch;
size_t tex_pitch;
float *d_linear_pitch_texture_data = nullptr;
hipArray *d_array_texture_data = nullptr;
uchar3 *dst_tex_data;
KernelSetting square_ks;
float *d_output_data = nullptr;
__constant__ int sobel_x_filter[] = { -1, 0, 1, -2, 0, 2, -1, 0, 1 };
__constant__ int sobel_y_filter[] = { 1, 2, 1, 0, 0, 0, -1, -2, -1 };
template<bool NormalizeTexel>__global__ void float_heighmap_texture_to_normalmap(const unsigned int tex_width, const unsigned int tex_height, const unsigned int dst_pitch, uchar3* dst)
{
const auto col = (threadIdx.x + blockIdx.x * blockDim.x);
const auto row = (threadIdx.y + blockIdx.y * blockDim.y);
float x = 0, y = 0, z = 0;
z = 0.5;
const auto offset = col + row * (dst_pitch / 3);
for (unsigned int i = 0; i < 3; i++) {
for (unsigned int j = 0; j < 3; j++) {
const float texel = tex2D(tex_ref, col + (j - 1), row + (i - 1));
x += texel * sobel_x_filter[j + i * 3];
y += texel * sobel_y_filter[j + i * 3];
}
}
x = x / 9;
y = y / 9;
if (NormalizeTexel) {
const auto distance = sqrt(x * x + y * y + z * z);
x /= distance;
y /= distance;
z /= distance;
}
uchar3 rgb_texel;
uchar3 bgr_texel;
rgb_texel.x = (x + 1) * 127.5;
rgb_texel.y = (y + 1) * 127.5;
rgb_texel.z = z * 255;
bgr_texel.x = rgb_texel.z;
bgr_texel.y = rgb_texel.y;
bgr_texel.z = rgb_texel.x;
dst[offset] = rgb_texel;
}
#pragma region STEP 1
//TASK: Load the input image and store loaded data in DEVICE memory (dSrcImageData)
void load_source_image(const char* image_file_name)
{
FreeImage_Initialise();
const auto tmp = ImageManager::GenericLoader(image_file_name, 0);
image_width = FreeImage_GetWidth(tmp);
image_height = FreeImage_GetHeight(tmp);
image_bpp = FreeImage_GetBPP(tmp);
image_pitch = FreeImage_GetPitch(tmp);
hipMalloc(reinterpret_cast<void**>(&d_image_data), image_pitch * image_height * image_bpp / 8);
hipMemcpy(d_image_data, FreeImage_GetBits(tmp), image_pitch * image_height * image_bpp / 8, hipMemcpyHostToDevice);
FreeImage_Unload(tmp);
FreeImage_DeInitialise();
}
#pragma endregion
#pragma region STEP 2
//TASK: Create a texture based on the source image. The input images can have variable BPP (Byte Per Pixel), but finally any such image will be converted into the floating-point texture using
// the colorToFloat kernel.
void create_src_texure()
{
//Floating Point Texture Data
hipMallocPitch(reinterpret_cast<void**>(&d_linear_pitch_texture_data), &tex_pitch, image_width * sizeof(float), image_height);
//Converts custom image data to float and stores result in the float_pitch_linear_data
switch (image_bpp)
{
case 8: colorToFloat<8> << <square_ks.dimGrid, square_ks.dimBlock >> > (d_image_data, image_width, image_height, image_pitch, tex_pitch / sizeof(float), d_linear_pitch_texture_data); break;
case 16: colorToFloat<16> << <square_ks.dimGrid, square_ks.dimBlock >> > (d_image_data, image_width, image_height, image_pitch, tex_pitch / sizeof(float), d_linear_pitch_texture_data); break;
case 24: colorToFloat<24> << <square_ks.dimGrid, square_ks.dimBlock >> > (d_image_data, image_width, image_height, image_pitch, tex_pitch / sizeof(float), d_linear_pitch_texture_data); break;
case 32: colorToFloat<32> << <square_ks.dimGrid, square_ks.dimBlock >> > (d_image_data, image_width, image_height, image_pitch, tex_pitch / sizeof(float), d_linear_pitch_texture_data); break;
}
//checkDeviceMatrix<float>(dLinearPitchTextureData, texPitch, imageHeight, imageWidth, "", "");
//Texture settings
tex_channel_desc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
tex_ref.normalized = false;
tex_ref.filterMode = hipFilterModePoint;
tex_ref.addressMode[0] = hipAddressModeClamp;
tex_ref.addressMode[1] = hipAddressModeClamp;
hipBindTexture2D(nullptr, &tex_ref, d_linear_pitch_texture_data, &tex_channel_desc, image_width, image_height, tex_pitch);
}
#pragma endregion
#pragma region STEP 3
//TASK: Convert the input image into normal map. Use the binded texture (srcTexRef).
void create_normal_map()
{
//T ODO: Allocate Pitch memory dstTexData to store output texture
checkCudaErrors(hipMallocPitch(reinterpret_cast<void**>(&dst_tex_data), &tex_pitch, image_width * sizeof(float), image_height));
//T ODO: Call the kernel that creates the normal map.
float_heighmap_texture_to_normalmap<true> << <square_ks.dimGrid, square_ks.dimBlock >> > (image_width, image_height, tex_pitch, dst_tex_data);
//check_data<uchar3>::checkDeviceMatrix(dstTexData, imageHeight, texPitch / sizeof(uchar3), true, "%hhu %hhu %hhu | ", "Result of Linear Pitch Text");
}
#pragma endregion
#pragma region STEP 4
//TASK: Save output image (normal map)
void save_tex_image(const char* image_file_name)
{
FreeImage_Initialise();
const auto tmp = FreeImage_Allocate(image_width, image_height, 24);
checkCudaErrors(hipMemcpy2D(FreeImage_GetBits(tmp), FreeImage_GetPitch(tmp), dst_tex_data, tex_pitch, image_width * 3, image_height, hipMemcpyDeviceToHost));
//FreeImage_Save(FIF_PNG, tmp, imageFileName, 0);
ImageManager::GenericWriter(tmp, image_file_name, FIF_PNG);
FreeImage_Unload(tmp);
FreeImage_DeInitialise();
}
#pragma endregion
void release_memory()
{
hipUnbindTexture(tex_ref);
if (d_image_data != nullptr)
hipFree(d_image_data);
if (d_linear_pitch_texture_data != nullptr)
hipFree(d_linear_pitch_texture_data);
if (d_array_texture_data)
hipFreeArray(d_array_texture_data);
if (d_output_data)
hipFree(d_output_data);
}
void exercise6()
{
//STEP 1
load_source_image("terrain3Kx3K.tif");
//TODO: Setup the kernel settings
square_ks.dimBlock = dim3(BLOCK_DIM, BLOCK_DIM, 1);
square_ks.blockSize = BLOCK_DIM * BLOCK_DIM;
square_ks.dimGrid = dim3((image_width + BLOCK_DIM - 1) / BLOCK_DIM, (image_height + BLOCK_DIM - 1) / BLOCK_DIM, 1);
//Step 2 - create heighmap texture stored in the linear pitch memory
create_src_texure();
//Step 3 - create the normal map
create_normal_map();
//Step 4 - save the normal map
save_tex_image("normalMap.bmp");
release_memory();
}
| 3ab55889f42145d586b9fb8c9c0f2c8b3236a4cf.cu | // includes, cuda
#include <cuda_runtime.h>
#include <cudaDefs.h>
#include <imageManager.h>
#include "imageKernels.cuh"
#define BLOCK_DIM 8
texture<float, 2, cudaReadModeElementType> tex_ref;
cudaChannelFormatDesc tex_channel_desc;
unsigned char *d_image_data = nullptr;
unsigned int image_width;
unsigned int image_height;
unsigned int image_bpp; //Bits Per Pixel = 8, 16, 24, or 32 bit
unsigned int image_pitch;
size_t tex_pitch;
float *d_linear_pitch_texture_data = nullptr;
cudaArray *d_array_texture_data = nullptr;
uchar3 *dst_tex_data;
KernelSetting square_ks;
float *d_output_data = nullptr;
__constant__ int sobel_x_filter[] = { -1, 0, 1, -2, 0, 2, -1, 0, 1 };
__constant__ int sobel_y_filter[] = { 1, 2, 1, 0, 0, 0, -1, -2, -1 };
template<bool NormalizeTexel>__global__ void float_heighmap_texture_to_normalmap(const unsigned int tex_width, const unsigned int tex_height, const unsigned int dst_pitch, uchar3* dst)
{
const auto col = (threadIdx.x + blockIdx.x * blockDim.x);
const auto row = (threadIdx.y + blockIdx.y * blockDim.y);
float x = 0, y = 0, z = 0;
z = 0.5;
const auto offset = col + row * (dst_pitch / 3);
for (unsigned int i = 0; i < 3; i++) {
for (unsigned int j = 0; j < 3; j++) {
const float texel = tex2D(tex_ref, col + (j - 1), row + (i - 1));
x += texel * sobel_x_filter[j + i * 3];
y += texel * sobel_y_filter[j + i * 3];
}
}
x = x / 9;
y = y / 9;
if (NormalizeTexel) {
const auto distance = sqrt(x * x + y * y + z * z);
x /= distance;
y /= distance;
z /= distance;
}
uchar3 rgb_texel;
uchar3 bgr_texel;
rgb_texel.x = (x + 1) * 127.5;
rgb_texel.y = (y + 1) * 127.5;
rgb_texel.z = z * 255;
bgr_texel.x = rgb_texel.z;
bgr_texel.y = rgb_texel.y;
bgr_texel.z = rgb_texel.x;
dst[offset] = rgb_texel;
}
#pragma region STEP 1
//TASK: Load the input image and store loaded data in DEVICE memory (dSrcImageData)
void load_source_image(const char* image_file_name)
{
FreeImage_Initialise();
const auto tmp = ImageManager::GenericLoader(image_file_name, 0);
image_width = FreeImage_GetWidth(tmp);
image_height = FreeImage_GetHeight(tmp);
image_bpp = FreeImage_GetBPP(tmp);
image_pitch = FreeImage_GetPitch(tmp);
cudaMalloc(reinterpret_cast<void**>(&d_image_data), image_pitch * image_height * image_bpp / 8);
cudaMemcpy(d_image_data, FreeImage_GetBits(tmp), image_pitch * image_height * image_bpp / 8, cudaMemcpyHostToDevice);
FreeImage_Unload(tmp);
FreeImage_DeInitialise();
}
#pragma endregion
#pragma region STEP 2
//TASK: Create a texture based on the source image. The input images can have variable BPP (Byte Per Pixel), but finally any such image will be converted into the floating-point texture using
// the colorToFloat kernel.
void create_src_texure()
{
//Floating Point Texture Data
cudaMallocPitch(reinterpret_cast<void**>(&d_linear_pitch_texture_data), &tex_pitch, image_width * sizeof(float), image_height);
//Converts custom image data to float and stores result in the float_pitch_linear_data
switch (image_bpp)
{
case 8: colorToFloat<8> << <square_ks.dimGrid, square_ks.dimBlock >> > (d_image_data, image_width, image_height, image_pitch, tex_pitch / sizeof(float), d_linear_pitch_texture_data); break;
case 16: colorToFloat<16> << <square_ks.dimGrid, square_ks.dimBlock >> > (d_image_data, image_width, image_height, image_pitch, tex_pitch / sizeof(float), d_linear_pitch_texture_data); break;
case 24: colorToFloat<24> << <square_ks.dimGrid, square_ks.dimBlock >> > (d_image_data, image_width, image_height, image_pitch, tex_pitch / sizeof(float), d_linear_pitch_texture_data); break;
case 32: colorToFloat<32> << <square_ks.dimGrid, square_ks.dimBlock >> > (d_image_data, image_width, image_height, image_pitch, tex_pitch / sizeof(float), d_linear_pitch_texture_data); break;
}
//checkDeviceMatrix<float>(dLinearPitchTextureData, texPitch, imageHeight, imageWidth, "", "");
//Texture settings
tex_channel_desc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
tex_ref.normalized = false;
tex_ref.filterMode = cudaFilterModePoint;
tex_ref.addressMode[0] = cudaAddressModeClamp;
tex_ref.addressMode[1] = cudaAddressModeClamp;
cudaBindTexture2D(nullptr, &tex_ref, d_linear_pitch_texture_data, &tex_channel_desc, image_width, image_height, tex_pitch);
}
#pragma endregion
#pragma region STEP 3
//TASK: Convert the input image into normal map. Use the binded texture (srcTexRef).
void create_normal_map()
{
//T ODO: Allocate Pitch memory dstTexData to store output texture
checkCudaErrors(cudaMallocPitch(reinterpret_cast<void**>(&dst_tex_data), &tex_pitch, image_width * sizeof(float), image_height));
//T ODO: Call the kernel that creates the normal map.
float_heighmap_texture_to_normalmap<true> << <square_ks.dimGrid, square_ks.dimBlock >> > (image_width, image_height, tex_pitch, dst_tex_data);
//check_data<uchar3>::checkDeviceMatrix(dstTexData, imageHeight, texPitch / sizeof(uchar3), true, "%hhu %hhu %hhu | ", "Result of Linear Pitch Text");
}
#pragma endregion
#pragma region STEP 4
//TASK: Save output image (normal map)
void save_tex_image(const char* image_file_name)
{
FreeImage_Initialise();
const auto tmp = FreeImage_Allocate(image_width, image_height, 24);
checkCudaErrors(cudaMemcpy2D(FreeImage_GetBits(tmp), FreeImage_GetPitch(tmp), dst_tex_data, tex_pitch, image_width * 3, image_height, cudaMemcpyDeviceToHost));
//FreeImage_Save(FIF_PNG, tmp, imageFileName, 0);
ImageManager::GenericWriter(tmp, image_file_name, FIF_PNG);
FreeImage_Unload(tmp);
FreeImage_DeInitialise();
}
#pragma endregion
void release_memory()
{
cudaUnbindTexture(tex_ref);
if (d_image_data != nullptr)
cudaFree(d_image_data);
if (d_linear_pitch_texture_data != nullptr)
cudaFree(d_linear_pitch_texture_data);
if (d_array_texture_data)
cudaFreeArray(d_array_texture_data);
if (d_output_data)
cudaFree(d_output_data);
}
void exercise6()
{
//STEP 1
load_source_image("terrain3Kx3K.tif");
//TODO: Setup the kernel settings
square_ks.dimBlock = dim3(BLOCK_DIM, BLOCK_DIM, 1);
square_ks.blockSize = BLOCK_DIM * BLOCK_DIM;
square_ks.dimGrid = dim3((image_width + BLOCK_DIM - 1) / BLOCK_DIM, (image_height + BLOCK_DIM - 1) / BLOCK_DIM, 1);
//Step 2 - create heighmap texture stored in the linear pitch memory
create_src_texure();
//Step 3 - create the normal map
create_normal_map();
//Step 4 - save the normal map
save_tex_image("normalMap.bmp");
release_memory();
}
|
db3d2fecbc3f53a09585768deb83247d650fb598.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file np_nonzero_op.cu
*/
#include "np_nonzero_op-inl.h"
#include <hipcub/hipcub.hpp>
namespace mxnet {
namespace op {
struct PrefixSumInit {
template <typename DType>
MSHADOW_XINLINE static void Map(int i, int32_t* out, DType* in) {
if (in[i]) {
out[i] = 1;
} else {
out[i] = 0;
}
}
};
#define MAXDIM 5
void NonzeroForwardGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
const NDArray& in = inputs[0];
const NDArray& out = outputs[0];
CHECK_LE(in.shape().ndim(), MAXDIM) << "ndim of input cannot larger than " << MAXDIM;
size_t in_size = in.shape().Size();
// 0-shape
if (0 == in_size) {
mxnet::TShape s(2, in.shape().ndim());
s[0] = 0;
const_cast<NDArray&>(out).Init(s);
return;
}
int32_t valid_num = 0;
Stream<gpu>* stream = ctx.get_stream<gpu>();
hipStream_t cuda_stream = Stream<gpu>::GetStream(stream);
int32_t* prefix_sum = nullptr;
void* d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
// Calculate total temporary memory size
hipcub::DeviceScan::InclusiveSum(
d_temp_storage, temp_storage_bytes, prefix_sum, prefix_sum, in_size, cuda_stream);
size_t buffer_size = in_size * sizeof(int32_t);
temp_storage_bytes += buffer_size;
// Allocate memory on GPU and allocate pointer
Tensor<gpu, 1, char> workspace =
ctx.requested[0].get_space_typed<gpu, 1, char>(Shape1(temp_storage_bytes), stream);
prefix_sum = reinterpret_cast<int32_t*>(workspace.dptr_);
d_temp_storage = workspace.dptr_ + buffer_size;
MSHADOW_TYPE_SWITCH_WITH_BOOL(in.dtype(), DType, {
mxnet_op::Kernel<PrefixSumInit, gpu>::Launch(
stream, in_size, prefix_sum, in.data().dptr<DType>());
});
// Calculate prefix sum
hipcub::DeviceScan::InclusiveSum(
d_temp_storage, temp_storage_bytes, prefix_sum, prefix_sum, in_size, cuda_stream);
CUDA_CALL(hipMemcpyAsync(
&valid_num, &prefix_sum[in_size - 1], sizeof(int32_t), hipMemcpyDeviceToHost, cuda_stream));
CUDA_CALL(hipStreamSynchronize(cuda_stream));
// 0-dim
if (0 == in.shape().ndim()) {
mxnet::TShape s(2, 1);
if (valid_num) {
const_cast<NDArray&>(out).Init(s);
int64_t temp = 0;
CUDA_CALL(hipMemcpyAsync(
out.data().dptr<int64_t>(), &temp, sizeof(int64_t), hipMemcpyHostToDevice, cuda_stream));
} else {
s[0] = 0;
const_cast<NDArray&>(out).Init(s);
}
return;
}
// Set the output shape forcefully
mxnet::TShape s(2, in.shape().ndim());
s[0] = valid_num;
const_cast<NDArray&>(out).Init(s);
// get the shape from the input
MXNET_NDIM_SWITCH(in.shape().ndim(), ndim, {
mshadow::Shape<ndim> shape = in.shape().get<ndim>();
mxnet_op::Kernel<NonzeroForwardKernelGPU, gpu>::Launch(
stream, in_size, out.data().dptr<int64_t>(), prefix_sum, shape);
})
}
NNVM_REGISTER_OP(_npx_nonzero)
.set_attr<FResourceRequest>("FResourceRequest",
[](const NodeAttrs& attrs) {
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
})
.set_attr<FComputeEx>("FComputeEx<gpu>", NonzeroForwardGPU);
} // namespace op
} // namespace mxnet
| db3d2fecbc3f53a09585768deb83247d650fb598.cu | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file np_nonzero_op.cu
*/
#include "np_nonzero_op-inl.h"
#include <cub/cub.cuh>
namespace mxnet {
namespace op {
struct PrefixSumInit {
template <typename DType>
MSHADOW_XINLINE static void Map(int i, int32_t* out, DType* in) {
if (in[i]) {
out[i] = 1;
} else {
out[i] = 0;
}
}
};
#define MAXDIM 5
void NonzeroForwardGPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
using namespace mshadow;
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
const NDArray& in = inputs[0];
const NDArray& out = outputs[0];
CHECK_LE(in.shape().ndim(), MAXDIM) << "ndim of input cannot larger than " << MAXDIM;
size_t in_size = in.shape().Size();
// 0-shape
if (0 == in_size) {
mxnet::TShape s(2, in.shape().ndim());
s[0] = 0;
const_cast<NDArray&>(out).Init(s);
return;
}
int32_t valid_num = 0;
Stream<gpu>* stream = ctx.get_stream<gpu>();
cudaStream_t cuda_stream = Stream<gpu>::GetStream(stream);
int32_t* prefix_sum = nullptr;
void* d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
// Calculate total temporary memory size
cub::DeviceScan::InclusiveSum(
d_temp_storage, temp_storage_bytes, prefix_sum, prefix_sum, in_size, cuda_stream);
size_t buffer_size = in_size * sizeof(int32_t);
temp_storage_bytes += buffer_size;
// Allocate memory on GPU and allocate pointer
Tensor<gpu, 1, char> workspace =
ctx.requested[0].get_space_typed<gpu, 1, char>(Shape1(temp_storage_bytes), stream);
prefix_sum = reinterpret_cast<int32_t*>(workspace.dptr_);
d_temp_storage = workspace.dptr_ + buffer_size;
MSHADOW_TYPE_SWITCH_WITH_BOOL(in.dtype(), DType, {
mxnet_op::Kernel<PrefixSumInit, gpu>::Launch(
stream, in_size, prefix_sum, in.data().dptr<DType>());
});
// Calculate prefix sum
cub::DeviceScan::InclusiveSum(
d_temp_storage, temp_storage_bytes, prefix_sum, prefix_sum, in_size, cuda_stream);
CUDA_CALL(cudaMemcpyAsync(
&valid_num, &prefix_sum[in_size - 1], sizeof(int32_t), cudaMemcpyDeviceToHost, cuda_stream));
CUDA_CALL(cudaStreamSynchronize(cuda_stream));
// 0-dim
if (0 == in.shape().ndim()) {
mxnet::TShape s(2, 1);
if (valid_num) {
const_cast<NDArray&>(out).Init(s);
int64_t temp = 0;
CUDA_CALL(cudaMemcpyAsync(
out.data().dptr<int64_t>(), &temp, sizeof(int64_t), cudaMemcpyHostToDevice, cuda_stream));
} else {
s[0] = 0;
const_cast<NDArray&>(out).Init(s);
}
return;
}
// Set the output shape forcefully
mxnet::TShape s(2, in.shape().ndim());
s[0] = valid_num;
const_cast<NDArray&>(out).Init(s);
// get the shape from the input
MXNET_NDIM_SWITCH(in.shape().ndim(), ndim, {
mshadow::Shape<ndim> shape = in.shape().get<ndim>();
mxnet_op::Kernel<NonzeroForwardKernelGPU, gpu>::Launch(
stream, in_size, out.data().dptr<int64_t>(), prefix_sum, shape);
})
}
NNVM_REGISTER_OP(_npx_nonzero)
.set_attr<FResourceRequest>("FResourceRequest",
[](const NodeAttrs& attrs) {
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
})
.set_attr<FComputeEx>("FComputeEx<gpu>", NonzeroForwardGPU);
} // namespace op
} // namespace mxnet
|
831d600dcd870b9efc57fad157d231465ec659b7.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Example of a Hopper gather+GEMM+scatter kernel fusion.
This example fuses gather before GEMM and scatter after GEMM into the same
GEMM kernel. Gather and scatter operation is controled by an index vector
to select rows or columns from A, B, C or D matrices.
Gather/scatter operations are always performed along a strided dimension
in order to preserve vectorized loads/stores. Thus the index vector is
applied to rows of row-major matrices and columns of column-major matrices.
Note that the index vector must contain integers in range [0,X) where
X is one of (M,N,K), depending on selected gather dimension. The problem
shape given to the GEMM kernel must consist of matrix sizes AFTER gather
and BEFORE scatter operations are applied.
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <algorithm>
#include <iostream>
#include <random>
#include <numeric>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm_universal.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
#include "cutlass/util/command_line.h"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/packed_stride.hpp"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
#include "gather_gemm.hpp"
#include "gather_kernel.cuh"
#include "scatter_epilogue.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
using namespace cute;
namespace example {
// Command line options parsing
struct Options {
bool help = false;
cutlass::gemm::BatchedGemmCoord problem_size = {2048, 2048, 2048, 1};
int index_size = 1024;
int mode = 1; // N-mode gather/scatter by default
float alpha = 1.0f;
float beta = 1.0f;
bool reference_check = true;
int iterations = 20;
bool valid() const {
return problem_size.m() > 0
&& problem_size.n() > 0
&& problem_size.k() > 0
&& problem_size.batch() > 0
&& 0 <= mode && mode < 3
&& index_size <= problem_size.at(mode)
&& iterations > 0;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
cmd.get_cmd_line_argument("m", problem_size.m());
cmd.get_cmd_line_argument("n", problem_size.n());
cmd.get_cmd_line_argument("k", problem_size.k());
cmd.get_cmd_line_argument("batch_size", problem_size.batch());
cmd.get_cmd_line_argument("index_size", index_size);
char const modes[] = {'m', 'n', 'k'};
char mode_input = modes[mode];
cmd.get_cmd_line_argument("mode", mode_input);
mode = int(std::distance(std::begin(modes), std::find(std::begin(modes), std::end(modes), mode_input)));
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("check", reference_check, true);
cmd.get_cmd_line_argument("iterations", iterations);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out <<
"52_hopper_gather_scatter_fusion example\n"
"\n"
" This example uses the CUTLASS Library to fuse gather/scatter of input/output tensors with GEMM.\n"
" It validates and benchmarks the fused kernel against an unfused implementation that executes\n"
" gather+GEMM+scatter in sequence and writes intermediate (gathered) tensors to memory.\n"
" For the unfused implementation two GEMM kernels are considered: default one that uses the same\n"
" schedule and instruction set as the fused one, and an optimized one that utilizes advanced\n"
" features (such as TMA units) that cannot be used by the fused kernel due to hardware constraints."
"\n"
"Options:\n"
" --help If specified, displays this usage statement.\n"
" --m=<int> GEMM M dimension\n"
" --n=<int> GEMM N dimension\n"
" --k=<int> GEMM K dimension\n"
" --batch_size=<int> GEMM batch size\n"
" --index_size=<int> Size of N dimension gather/scatter index\n"
" --mode=<m,n,k> Gather mode (M, N, or K)\n"
" --alpha=<float> GEMM alpha parameter\n"
" --beta=<float> GEMM beta parameter\n"
" --iterations=<int> Number of profiling iterations to perform.\n"
"\n"
"Examples:\n"
"\n"
"$ ./examples/52_hopper_gather_scatter_fusion/52_hopper_gather_scatter_fusion --m=1024 --n=2048 --k=1024 --mode=n --index_size=1024\n";
return out;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template<class ElementA, class LayoutA, class GatherA,
class ElementB, class LayoutB, class GatherB,
class ElementC, class LayoutC, class GatherC,
class ElementD, class LayoutD, class ScatterD,
class ElementAccumulator, class ElementComputeEpilogue>
struct ExampleRunner
{
// Useful aliases
// Alias to for the epilogue type that supports gather/scatter
using Epilogue = cutlass::epilogue::collective::EpilogueGatherScatter<
cutlass::gemm::TagToStrideC_t<LayoutC>,
cutlass::gemm::TagToStrideC_t<LayoutD>,
cutlass::epilogue::thread::LinearCombination<
ElementD, 1,
ElementAccumulator, ElementComputeEpilogue,
cutlass::epilogue::thread::ScaleType::Default,
cutlass::FloatRoundStyle::round_to_nearest, ElementC
>,
cutlass::gemm::EpilogueDefault,
GatherC,
ScatterD
>;
// Alias to for the mainloop type
using Mainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
ElementA, LayoutA, 128 / cutlass::sizeof_bits<ElementA>::value,
ElementB, LayoutB, 128 / cutlass::sizeof_bits<ElementB>::value,
ElementAccumulator,
Shape<_128,_128,_64>,
Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCount<5>,
cutlass::gemm::KernelMultistage
>::CollectiveOp;
using ProblemShape = Shape<int,int,int,int>;
using Kernel = cutlass::gemm::kernel::GemmGather<
ProblemShape,
Mainloop,
Epilogue,
GatherA,
GatherB
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<Kernel>;
using StrideA = typename Kernel::StrideA;
using StrideB = typename Kernel::StrideB;
using StrideC = typename Kernel::StrideC;
using StrideD = typename Kernel::StrideD;
static constexpr bool DoGatherA = not cutlass::platform::is_same<GatherA, NoGather>::value;
static constexpr bool DoGatherB = not cutlass::platform::is_same<GatherB, NoGather>::value;
static constexpr bool DoGatherC = not cutlass::platform::is_same<GatherC, NoGather>::value;
static constexpr bool DoScatterD = not cutlass::platform::is_same<ScatterD, NoGather>::value;
static constexpr bool GatherAonM = DoGatherA && cutlass::platform::is_same<LayoutA,cutlass::layout::RowMajor>::value;
static constexpr bool GatherAonK = DoGatherA && cutlass::platform::is_same<LayoutA,cutlass::layout::ColumnMajor>::value;
static constexpr bool GatherBonN = DoGatherB && cutlass::platform::is_same<LayoutB,cutlass::layout::ColumnMajor>::value;
static constexpr bool GatherBonK = DoGatherB && cutlass::platform::is_same<LayoutB,cutlass::layout::RowMajor>::value;
static constexpr bool GatherConM = DoGatherC && cutlass::platform::is_same<LayoutC,cutlass::layout::RowMajor>::value;
static constexpr bool GatherConN = DoGatherC && cutlass::platform::is_same<LayoutC,cutlass::layout::ColumnMajor>::value;
static constexpr bool ScatterDonM = DoScatterD && cutlass::platform::is_same<LayoutD,cutlass::layout::RowMajor>::value;
static constexpr bool ScatterDonN = DoScatterD && cutlass::platform::is_same<LayoutD,cutlass::layout::ColumnMajor>::value;
static constexpr bool GatherModeM = GatherAonM || GatherConM || ScatterDonM;
static constexpr bool GatherModeN = GatherBonN || GatherConN || ScatterDonN;
static constexpr bool GatherModeK = GatherAonK || GatherBonK;
static_assert( GatherModeM && !GatherModeN && !GatherModeK ||
!GatherModeM && GatherModeN && !GatherModeK ||
!GatherModeM && !GatherModeN && GatherModeK,
"Only one gather mode (M, N or K) is supported by example runner");
// Construct a reference (non-gather) GEMM kernel type
using MainloopRef = Mainloop;
using EpilogueRef = typename cutlass::epilogue::collective::DefaultEpilogue<
StrideC, StrideD,
typename Epilogue::ThreadEpilogueOp,
typename Epilogue::EpilogueSchedule
>;
using KernelRef = cutlass::gemm::kernel::GemmUniversal<
ProblemShape,
MainloopRef,
EpilogueRef
>;
using GemmRef = cutlass::gemm::device::GemmUniversalAdapter<KernelRef>;
// Construct an optimized reference GEMM kernel type (using TMA)
using EpilogueOpt = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_128,_128,_64>,
Shape<_2,_2,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
ElementAccumulator, ElementComputeEpilogue,
ElementC, LayoutC, 128 / cutlass::sizeof_bits<ElementC>::value,
ElementD, LayoutD, 128 / cutlass::sizeof_bits<ElementD>::value,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using MainloopOpt = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
ElementA, LayoutA, 128 / cutlass::sizeof_bits<ElementA>::value,
ElementB, LayoutB, 128 / cutlass::sizeof_bits<ElementB>::value,
ElementAccumulator,
Shape<_128,_128,_64>,
Shape<_2,_2,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOpt::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using KernelOpt = cutlass::gemm::kernel::GemmUniversal<
ProblemShape,
MainloopOpt,
EpilogueOpt
>;
using GemmOpt = cutlass::gemm::device::GemmUniversalAdapter<KernelOpt>;
// Data members
cutlass::gemm::BatchedGemmCoord problem_size_orig;
cutlass::gemm::BatchedGemmCoord problem_size;
ProblemShape problem_shape_orig;
ProblemShape problem_shape;
cutlass::KernelHardwareInfo hw_info;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
StrideA stride_A_orig;
StrideB stride_B_orig;
StrideC stride_C_orig;
StrideD stride_D_orig;
StrideA stride_A;
StrideB stride_B;
StrideC stride_C;
StrideD stride_D;
cutlass::device_memory::allocation<ElementA> tensor_a;
cutlass::device_memory::allocation<ElementB> tensor_b;
cutlass::device_memory::allocation<ElementC> tensor_c;
cutlass::device_memory::allocation<ElementD> tensor_d;
cutlass::device_memory::allocation<int> gather_indices;
cutlass::device_memory::allocation<ElementA> tensor_a_gathered;
cutlass::device_memory::allocation<ElementB> tensor_b_gathered;
cutlass::device_memory::allocation<ElementC> tensor_c_gathered;
cutlass::device_memory::allocation<ElementD> tensor_d_gathered;
cutlass::device_memory::allocation<ElementD> tensor_d_reference;
cutlass::gemm::GemmUniversalMode gemm_mode;
Gemm gemm;
typename Gemm::Arguments arguments;
cutlass::device_memory::allocation<uint8_t> workspace;
GemmRef gemm_ref;
typename GemmRef::Arguments arguments_ref;
cutlass::device_memory::allocation<uint8_t> workspace_ref;
GemmOpt gemm_opt;
typename GemmOpt::Arguments arguments_opt;
cutlass::device_memory::allocation<uint8_t> workspace_opt;
ExampleRunner(Options const &options, cutlass::KernelHardwareInfo const &hw_info)
: problem_size_orig(options.problem_size),
problem_size(GatherModeM ? options.index_size : problem_size_orig.m(),
GatherModeN ? options.index_size : problem_size_orig.n(),
GatherModeK ? options.index_size : problem_size_orig.k(),
problem_size_orig.batch()),
problem_shape_orig(problem_size_orig.m(), problem_size_orig.n(), problem_size_orig.k(), problem_size_orig.batch()),
problem_shape(problem_size.m(), problem_size.n(), problem_size.k(), problem_size.batch()),
hw_info(hw_info),
alpha(options.alpha),
beta(options.beta),
stride_A_orig(cutlass::make_cute_packed_stride(
StrideA{}, make_shape(problem_size_orig.m(), problem_size_orig.k(), problem_size_orig.batch()))),
stride_B_orig(cutlass::make_cute_packed_stride(
StrideB{}, make_shape(problem_size_orig.n(), problem_size_orig.k(), problem_size_orig.batch()))),
stride_C_orig(cutlass::make_cute_packed_stride(
StrideC{}, make_shape(problem_size_orig.m(), problem_size_orig.n(), problem_size_orig.batch()))),
stride_D_orig(cutlass::make_cute_packed_stride(
StrideD{}, make_shape(problem_size_orig.m(), problem_size_orig.n(), problem_size_orig.batch()))),
stride_A(cutlass::make_cute_packed_stride(
StrideA{}, make_shape(problem_size.m(), problem_size.k(), problem_size.batch()))),
stride_B(cutlass::make_cute_packed_stride(
StrideB{}, make_shape(problem_size.n(), problem_size.k(), problem_size.batch()))),
stride_C(cutlass::make_cute_packed_stride(
StrideC{}, make_shape(problem_size.m(), problem_size.n(), problem_size.batch()))),
stride_D(cutlass::make_cute_packed_stride(
StrideD{}, make_shape(problem_size.m(), problem_size.n(), problem_size.batch()))),
tensor_a(problem_size_orig.m() * problem_size_orig.k() * problem_size_orig.batch()),
tensor_b(problem_size_orig.k() * problem_size_orig.n() * problem_size_orig.batch()),
tensor_c(problem_size_orig.m() * problem_size_orig.n() * problem_size_orig.batch()),
tensor_d(problem_size_orig.m() * problem_size_orig.n() * problem_size_orig.batch()),
gather_indices(options.index_size),
tensor_a_gathered(problem_size.m() * problem_size.k() * problem_size_orig.batch()),
tensor_b_gathered(problem_size.k() * problem_size.n() * problem_size_orig.batch()),
tensor_c_gathered(problem_size.m() * problem_size.n() * problem_size_orig.batch()),
tensor_d_gathered(problem_size.m() * problem_size.n() * problem_size_orig.batch()),
tensor_d_reference(problem_size_orig.m() * problem_size_orig.n() * problem_size_orig.batch()),
gemm_mode(problem_size.batch() > 1 ? cutlass::gemm::GemmUniversalMode::kBatched : cutlass::gemm::GemmUniversalMode::kGemm),
gemm(),
// When constructing arguments for gather/scatter gemm, we must pass stride arguments
// made for the original (non-gathered) problem size, because they are used to access
// tensors of the original shape. However we still use the reduced (gathered) problem
// shape since it corresponds to the logical indexing in reduced size GEMM.
arguments{
gemm_mode,
problem_shape,
{
tensor_a.get(),
stride_A_orig,
tensor_b.get(),
stride_B_orig
},
{
{ alpha, beta },
tensor_c.get(), stride_C_orig,
tensor_d.get(), stride_D_orig,
typename Epilogue::GatherC {gather_indices.get()},
typename Epilogue::ScatterD{gather_indices.get()}
},
hw_info,
typename Kernel::GatherA{gather_indices.get()},
typename Kernel::GatherB{gather_indices.get()}
},
workspace(Gemm::get_workspace_size(arguments)),
gemm_ref(),
arguments_ref{
gemm_mode,
problem_shape,
{
DoGatherA ? tensor_a_gathered.get() : tensor_a.get(),
stride_A,
DoGatherB ? tensor_b_gathered.get() : tensor_b.get(),
stride_B
},
{
{ alpha, beta },
DoGatherC ? tensor_c_gathered.get() : tensor_c.get(),
stride_C,
DoScatterD ? tensor_d_gathered.get() : tensor_d_reference.get(),
stride_D
},
hw_info
},
workspace_ref(GemmRef::get_workspace_size(arguments_ref)),
gemm_opt(),
arguments_opt{
gemm_mode,
problem_shape,
{
DoGatherA ? tensor_a_gathered.get() : tensor_a.get(),
stride_A,
DoGatherB ? tensor_b_gathered.get() : tensor_b.get(),
stride_B
},
{
{ alpha, beta },
DoGatherC ? tensor_c_gathered.get() : tensor_c.get(),
stride_C,
DoScatterD ? tensor_d_gathered.get() : tensor_d_reference.get(),
stride_D
},
hw_info
},
workspace_opt(GemmOpt::get_workspace_size(arguments_opt))
{
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::device::BlockFillRandomUniform(tensor_a.get(), tensor_a.size(), 1, ElementA(7), ElementA(-8), 0);
cutlass::reference::device::BlockFillRandomUniform(tensor_b.get(), tensor_b.size(), 1, ElementB(7), ElementB(-8), 0);
cutlass::reference::device::BlockFillRandomUniform(tensor_c.get(), tensor_c.size(), 1, ElementC(7), ElementC(-8), 0);
cutlass::reference::device::BlockFillSequential(tensor_d.get(), tensor_d.size(), ElementD(0), ElementD(0));
// <- Fill gather_indices with unique random integers in range [0,n)
int index_range = GatherModeM ? problem_size_orig.m() : (GatherModeN ? problem_size_orig.n() : problem_size_orig.k());
std::vector<int> indices(index_range);
std::iota(indices.begin(), indices.end(), 0);
{ // std::random_shuffle was deprecated in C++14 and removed in C++17
std::random_device make_seed;
std::mt19937 source_of_randomness(make_seed());
std::shuffle(indices.begin(), indices.end(), source_of_randomness);
}
gather_indices.copy_from_host(indices.data());
auto const gemm_init = [](auto & gemm, auto const & arguments, auto & workspace)
{
cutlass::Status status = gemm.can_implement(arguments);
CUTLASS_CHECK(status);
status = gemm.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
};
gemm_init(gemm, arguments, workspace );
gemm_init(gemm_ref, arguments_ref, workspace_ref);
gemm_init(gemm_opt, arguments_opt, workspace_opt);
}
void debug_output(std::ostream & os)
{
auto print_tensor = [](std::ostream &os, char const * name, auto const & data, auto shape, auto stride)
{
std::vector<remove_cvref_t<decltype(*data.get())>> h_data(data.size());
data.copy_to_host(h_data.data());
Tensor t = make_tensor(h_data.data(), shape, stride);
os << "\n" << name << ": " << std::setw(4) << t << std::endl;
};
{
auto [M,N,K,L] = problem_shape_orig;
print_tensor(os, "A", tensor_a, make_shape(M,K,L), stride_A_orig);
print_tensor(os, "B", tensor_b, make_shape(N,K,L), stride_B_orig);
print_tensor(os, "C", tensor_c, make_shape(M,N,L), stride_C_orig);
print_tensor(os, "D", tensor_d, make_shape(M,N,L), stride_D_orig);
print_tensor(os, "D reference", tensor_d_reference, make_shape(M,N,L), stride_D_orig);
print_tensor(os, "indices", gather_indices, make_shape(gather_indices.size()), make_stride(_1{}));
}
}
template<class Gemm2>
static void run_gemm(Gemm2 &gemm)
{
cutlass::Status status = gemm.run();
CUTLASS_CHECK(status);
}
template<class Gemm2>
void run_reference(Gemm2 &gemm)
{
// Convenience wrapper around calls to separate gather/scatter kernels
auto run_gather = [this](auto call, auto const & input, auto & output, auto gather_func, auto batch_size, auto stride)
{
[[maybe_unused]] auto idx = find_if(stride, [](auto x){ return not is_constant<1, decltype(x)>{}; });
constexpr int I = decltype(idx)::value;
call(input.get(),
output.get(),
gather_func,
batch_size,
static_cast<int>(input.size() / batch_size),
static_cast<int>(output.size() / batch_size),
static_cast<int>(get<I>(stride)),
hw_info);
};
// Forward calls via lambda to avoid specifying template arguments
auto gather_call = [](auto&&... args){ gather(static_cast<decltype(args)&&>(args)...); };
auto scatter_call = [](auto&&... args){ scatter(static_cast<decltype(args)&&>(args)...); };
if constexpr (DoGatherA) {
run_gather(gather_call, tensor_a, tensor_a_gathered, arguments.gather_A, problem_size.batch(), stride_A);
}
if constexpr (DoGatherB) {
run_gather(gather_call, tensor_b, tensor_b_gathered, arguments.gather_B, problem_size.batch(), stride_B);
}
if constexpr (DoGatherC) {
if (beta != ElementComputeEpilogue(0)) {
run_gather(gather_call, tensor_c, tensor_c_gathered, arguments.epilogue.gather_C, problem_size.batch(), stride_C);
}
}
run_gemm(gemm);
if constexpr (DoScatterD) {
run_gather(scatter_call, tensor_d_gathered, tensor_d_reference, arguments.epilogue.scatter_D, problem_size.batch(), stride_D);
}
}
bool verify()
{
run_gemm(gemm);
run_reference(gemm_ref);
hipDeviceSynchronize();
return cutlass::reference::device::BlockCompareEqual(tensor_d.get(), tensor_d_reference.get(), tensor_d.size());
}
bool run(Options const &options)
{
if (options.reference_check) {
if (!verify()) {
std::cout << "Failed validation" << std::endl;
#if 1
debug_output(std::cout);
#endif
return false;
}
else {
std::cout << "Passed validation" << std::endl;
}
}
//
// Run profiling loop
//
auto const benchmark = [&](auto name, auto func)
{
GpuTimer timer;
timer.start();
for (int iter = 0; iter < options.iterations; ++iter) {
func();
}
timer.stop();
double runtime = timer.elapsed_millis() / double(options.iterations);
double gflops = 2 * double(problem_size.product()) / 1e6 / runtime; // Two flops per multiply-add
std::cout << name << ":\n";
std::cout << " Runtime: " << runtime << " ms\n";
std::cout << " GFLOPs: " << gflops << "\n";
};
benchmark("Fused", [&](){ run_gemm(gemm); });
benchmark("Unfused default", [&](){ run_reference(gemm_ref); });
benchmark("Unfused optimized", [&](){ run_reference(gemm_opt); });
return true;
}
};
} // namespace example
int main(int argc, const char ** argv) {
bool notSupported = false;
// CUDA 12 minimum required
if (__CUDACC_VER_MAJOR__ < 12) {
std::cerr << "This example requires CUDA Toolkit version 12 or later.\n";
notSupported = true;
}
hipDeviceProp_t props;
CUDA_CHECK(hipGetDeviceProperties(&props, 0));
if (props.major < 9) {
std::cerr << "This example requires a device with compute capability 90 or higher.\n";
notSupported = true;
}
if (notSupported) {
return EXIT_SUCCESS; // Do not fail CI checks on unsupported systems
}
example::Options options;
options.parse(argc, argv);
if (options.help) {
options.print_usage(std::cout) << "\n";
return EXIT_SUCCESS;
}
if (!options.valid()) {
std::cerr << "Invalid arguments." << "\n";
return EXIT_FAILURE;
}
cutlass::KernelHardwareInfo hw_info;
hw_info.device_id = 0;
hw_info.sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id);
bool result = true;
#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
switch (options.mode) {
using namespace example;
case 0: {
std::cout << "Gather A,C + scatter D on M mode:" << std::endl;
using Runner = ExampleRunner<
cutlass::half_t, cutlass::layout::RowMajor, IndexedGather<int>, // A
cutlass::half_t, cutlass::layout::ColumnMajor, NoGather, // B
cutlass::half_t, cutlass::layout::RowMajor, IndexedGather<int>, // C
cutlass::half_t, cutlass::layout::RowMajor, IndexedGather<int>, // D
float, float>;
result &= Runner(options, hw_info).run(options);
break;
}
case 1: {
std::cout << "Gather B,C + scatter D on N mode:" << std::endl;
using Runner = ExampleRunner<
cutlass::half_t, cutlass::layout::RowMajor, NoGather, // A
cutlass::half_t, cutlass::layout::ColumnMajor, IndexedGather<int>, // B
cutlass::half_t, cutlass::layout::ColumnMajor, IndexedGather<int>, // C
cutlass::half_t, cutlass::layout::ColumnMajor, IndexedGather<int>, // D
float, float>;
result &= Runner(options, hw_info).run(options);
break;
}
case 2: {
std::cout << "Gather A,B on K mode:" << std::endl;
using Runner = ExampleRunner<
cutlass::half_t, cutlass::layout::ColumnMajor, IndexedGather<int>, // A
cutlass::half_t, cutlass::layout::RowMajor, IndexedGather<int>, // B
cutlass::half_t, cutlass::layout::RowMajor, NoGather, // C
cutlass::half_t, cutlass::layout::RowMajor, NoGather, // D
float, float>;
result &= Runner(options, hw_info).run(options);
break;
}
}
#endif
return result ? EXIT_SUCCESS : EXIT_FAILURE;
}
| 831d600dcd870b9efc57fad157d231465ec659b7.cu | /***************************************************************************************************
* Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*! \file
\brief Example of a Hopper gather+GEMM+scatter kernel fusion.
This example fuses gather before GEMM and scatter after GEMM into the same
GEMM kernel. Gather and scatter operation is controled by an index vector
to select rows or columns from A, B, C or D matrices.
Gather/scatter operations are always performed along a strided dimension
in order to preserve vectorized loads/stores. Thus the index vector is
applied to rows of row-major matrices and columns of column-major matrices.
Note that the index vector must contain integers in range [0,X) where
X is one of (M,N,K), depending on selected gather dimension. The problem
shape given to the GEMM kernel must consist of matrix sizes AFTER gather
and BEFORE scatter operations are applied.
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include <assert.h>
#include <cuda_runtime.h>
#include <algorithm>
#include <iostream>
#include <random>
#include <numeric>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm_universal.h"
#include "cutlass/gemm/device/gemm_universal_adapter.h"
#include "cutlass/gemm/collective/collective_builder.hpp"
#include "cutlass/epilogue/thread/linear_combination.h"
#include "cutlass/epilogue/collective/collective_builder.hpp"
#include "cutlass/epilogue/collective/default_epilogue.hpp"
#include "cutlass/util/command_line.h"
#include "cutlass/util/device_memory.h"
#include "cutlass/util/packed_stride.hpp"
#include "cutlass/util/reference/device/tensor_fill.h"
#include "cutlass/util/reference/device/tensor_compare.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
#include "gather_gemm.hpp"
#include "gather_kernel.cuh"
#include "scatter_epilogue.hpp"
/////////////////////////////////////////////////////////////////////////////////////////////////
using namespace cute;
namespace example {
// Command line options parsing
struct Options {
bool help = false;
cutlass::gemm::BatchedGemmCoord problem_size = {2048, 2048, 2048, 1};
int index_size = 1024;
int mode = 1; // N-mode gather/scatter by default
float alpha = 1.0f;
float beta = 1.0f;
bool reference_check = true;
int iterations = 20;
bool valid() const {
return problem_size.m() > 0
&& problem_size.n() > 0
&& problem_size.k() > 0
&& problem_size.batch() > 0
&& 0 <= mode && mode < 3
&& index_size <= problem_size.at(mode)
&& iterations > 0;
}
// Parses the command line
void parse(int argc, char const **args) {
cutlass::CommandLine cmd(argc, args);
if (cmd.check_cmd_line_flag("help")) {
help = true;
}
cmd.get_cmd_line_argument("m", problem_size.m());
cmd.get_cmd_line_argument("n", problem_size.n());
cmd.get_cmd_line_argument("k", problem_size.k());
cmd.get_cmd_line_argument("batch_size", problem_size.batch());
cmd.get_cmd_line_argument("index_size", index_size);
char const modes[] = {'m', 'n', 'k'};
char mode_input = modes[mode];
cmd.get_cmd_line_argument("mode", mode_input);
mode = int(std::distance(std::begin(modes), std::find(std::begin(modes), std::end(modes), mode_input)));
cmd.get_cmd_line_argument("alpha", alpha);
cmd.get_cmd_line_argument("beta", beta);
cmd.get_cmd_line_argument("check", reference_check, true);
cmd.get_cmd_line_argument("iterations", iterations);
}
/// Prints the usage statement.
std::ostream & print_usage(std::ostream &out) const {
out <<
"52_hopper_gather_scatter_fusion example\n"
"\n"
" This example uses the CUTLASS Library to fuse gather/scatter of input/output tensors with GEMM.\n"
" It validates and benchmarks the fused kernel against an unfused implementation that executes\n"
" gather+GEMM+scatter in sequence and writes intermediate (gathered) tensors to memory.\n"
" For the unfused implementation two GEMM kernels are considered: default one that uses the same\n"
" schedule and instruction set as the fused one, and an optimized one that utilizes advanced\n"
" features (such as TMA units) that cannot be used by the fused kernel due to hardware constraints."
"\n"
"Options:\n"
" --help If specified, displays this usage statement.\n"
" --m=<int> GEMM M dimension\n"
" --n=<int> GEMM N dimension\n"
" --k=<int> GEMM K dimension\n"
" --batch_size=<int> GEMM batch size\n"
" --index_size=<int> Size of N dimension gather/scatter index\n"
" --mode=<m,n,k> Gather mode (M, N, or K)\n"
" --alpha=<float> GEMM alpha parameter\n"
" --beta=<float> GEMM beta parameter\n"
" --iterations=<int> Number of profiling iterations to perform.\n"
"\n"
"Examples:\n"
"\n"
"$ ./examples/52_hopper_gather_scatter_fusion/52_hopper_gather_scatter_fusion --m=1024 --n=2048 --k=1024 --mode=n --index_size=1024\n";
return out;
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////
template<class ElementA, class LayoutA, class GatherA,
class ElementB, class LayoutB, class GatherB,
class ElementC, class LayoutC, class GatherC,
class ElementD, class LayoutD, class ScatterD,
class ElementAccumulator, class ElementComputeEpilogue>
struct ExampleRunner
{
// Useful aliases
// Alias to for the epilogue type that supports gather/scatter
using Epilogue = cutlass::epilogue::collective::EpilogueGatherScatter<
cutlass::gemm::TagToStrideC_t<LayoutC>,
cutlass::gemm::TagToStrideC_t<LayoutD>,
cutlass::epilogue::thread::LinearCombination<
ElementD, 1,
ElementAccumulator, ElementComputeEpilogue,
cutlass::epilogue::thread::ScaleType::Default,
cutlass::FloatRoundStyle::round_to_nearest, ElementC
>,
cutlass::gemm::EpilogueDefault,
GatherC,
ScatterD
>;
// Alias to for the mainloop type
using Mainloop = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
ElementA, LayoutA, 128 / cutlass::sizeof_bits<ElementA>::value,
ElementB, LayoutB, 128 / cutlass::sizeof_bits<ElementB>::value,
ElementAccumulator,
Shape<_128,_128,_64>,
Shape<_1,_1,_1>,
cutlass::gemm::collective::StageCount<5>,
cutlass::gemm::KernelMultistage
>::CollectiveOp;
using ProblemShape = Shape<int,int,int,int>;
using Kernel = cutlass::gemm::kernel::GemmGather<
ProblemShape,
Mainloop,
Epilogue,
GatherA,
GatherB
>;
using Gemm = cutlass::gemm::device::GemmUniversalAdapter<Kernel>;
using StrideA = typename Kernel::StrideA;
using StrideB = typename Kernel::StrideB;
using StrideC = typename Kernel::StrideC;
using StrideD = typename Kernel::StrideD;
static constexpr bool DoGatherA = not cutlass::platform::is_same<GatherA, NoGather>::value;
static constexpr bool DoGatherB = not cutlass::platform::is_same<GatherB, NoGather>::value;
static constexpr bool DoGatherC = not cutlass::platform::is_same<GatherC, NoGather>::value;
static constexpr bool DoScatterD = not cutlass::platform::is_same<ScatterD, NoGather>::value;
static constexpr bool GatherAonM = DoGatherA && cutlass::platform::is_same<LayoutA,cutlass::layout::RowMajor>::value;
static constexpr bool GatherAonK = DoGatherA && cutlass::platform::is_same<LayoutA,cutlass::layout::ColumnMajor>::value;
static constexpr bool GatherBonN = DoGatherB && cutlass::platform::is_same<LayoutB,cutlass::layout::ColumnMajor>::value;
static constexpr bool GatherBonK = DoGatherB && cutlass::platform::is_same<LayoutB,cutlass::layout::RowMajor>::value;
static constexpr bool GatherConM = DoGatherC && cutlass::platform::is_same<LayoutC,cutlass::layout::RowMajor>::value;
static constexpr bool GatherConN = DoGatherC && cutlass::platform::is_same<LayoutC,cutlass::layout::ColumnMajor>::value;
static constexpr bool ScatterDonM = DoScatterD && cutlass::platform::is_same<LayoutD,cutlass::layout::RowMajor>::value;
static constexpr bool ScatterDonN = DoScatterD && cutlass::platform::is_same<LayoutD,cutlass::layout::ColumnMajor>::value;
static constexpr bool GatherModeM = GatherAonM || GatherConM || ScatterDonM;
static constexpr bool GatherModeN = GatherBonN || GatherConN || ScatterDonN;
static constexpr bool GatherModeK = GatherAonK || GatherBonK;
static_assert( GatherModeM && !GatherModeN && !GatherModeK ||
!GatherModeM && GatherModeN && !GatherModeK ||
!GatherModeM && !GatherModeN && GatherModeK,
"Only one gather mode (M, N or K) is supported by example runner");
// Construct a reference (non-gather) GEMM kernel type
using MainloopRef = Mainloop;
using EpilogueRef = typename cutlass::epilogue::collective::DefaultEpilogue<
StrideC, StrideD,
typename Epilogue::ThreadEpilogueOp,
typename Epilogue::EpilogueSchedule
>;
using KernelRef = cutlass::gemm::kernel::GemmUniversal<
ProblemShape,
MainloopRef,
EpilogueRef
>;
using GemmRef = cutlass::gemm::device::GemmUniversalAdapter<KernelRef>;
// Construct an optimized reference GEMM kernel type (using TMA)
using EpilogueOpt = typename cutlass::epilogue::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
Shape<_128,_128,_64>,
Shape<_2,_2,_1>,
cutlass::epilogue::collective::EpilogueTileAuto,
ElementAccumulator, ElementComputeEpilogue,
ElementC, LayoutC, 128 / cutlass::sizeof_bits<ElementC>::value,
ElementD, LayoutD, 128 / cutlass::sizeof_bits<ElementD>::value,
cutlass::epilogue::collective::EpilogueScheduleAuto
>::CollectiveOp;
using MainloopOpt = typename cutlass::gemm::collective::CollectiveBuilder<
cutlass::arch::Sm90, cutlass::arch::OpClassTensorOp,
ElementA, LayoutA, 128 / cutlass::sizeof_bits<ElementA>::value,
ElementB, LayoutB, 128 / cutlass::sizeof_bits<ElementB>::value,
ElementAccumulator,
Shape<_128,_128,_64>,
Shape<_2,_2,_1>,
cutlass::gemm::collective::StageCountAutoCarveout<sizeof(typename EpilogueOpt::SharedStorage)>,
cutlass::gemm::collective::KernelScheduleAuto
>::CollectiveOp;
using KernelOpt = cutlass::gemm::kernel::GemmUniversal<
ProblemShape,
MainloopOpt,
EpilogueOpt
>;
using GemmOpt = cutlass::gemm::device::GemmUniversalAdapter<KernelOpt>;
// Data members
cutlass::gemm::BatchedGemmCoord problem_size_orig;
cutlass::gemm::BatchedGemmCoord problem_size;
ProblemShape problem_shape_orig;
ProblemShape problem_shape;
cutlass::KernelHardwareInfo hw_info;
ElementComputeEpilogue alpha;
ElementComputeEpilogue beta;
StrideA stride_A_orig;
StrideB stride_B_orig;
StrideC stride_C_orig;
StrideD stride_D_orig;
StrideA stride_A;
StrideB stride_B;
StrideC stride_C;
StrideD stride_D;
cutlass::device_memory::allocation<ElementA> tensor_a;
cutlass::device_memory::allocation<ElementB> tensor_b;
cutlass::device_memory::allocation<ElementC> tensor_c;
cutlass::device_memory::allocation<ElementD> tensor_d;
cutlass::device_memory::allocation<int> gather_indices;
cutlass::device_memory::allocation<ElementA> tensor_a_gathered;
cutlass::device_memory::allocation<ElementB> tensor_b_gathered;
cutlass::device_memory::allocation<ElementC> tensor_c_gathered;
cutlass::device_memory::allocation<ElementD> tensor_d_gathered;
cutlass::device_memory::allocation<ElementD> tensor_d_reference;
cutlass::gemm::GemmUniversalMode gemm_mode;
Gemm gemm;
typename Gemm::Arguments arguments;
cutlass::device_memory::allocation<uint8_t> workspace;
GemmRef gemm_ref;
typename GemmRef::Arguments arguments_ref;
cutlass::device_memory::allocation<uint8_t> workspace_ref;
GemmOpt gemm_opt;
typename GemmOpt::Arguments arguments_opt;
cutlass::device_memory::allocation<uint8_t> workspace_opt;
ExampleRunner(Options const &options, cutlass::KernelHardwareInfo const &hw_info)
: problem_size_orig(options.problem_size),
problem_size(GatherModeM ? options.index_size : problem_size_orig.m(),
GatherModeN ? options.index_size : problem_size_orig.n(),
GatherModeK ? options.index_size : problem_size_orig.k(),
problem_size_orig.batch()),
problem_shape_orig(problem_size_orig.m(), problem_size_orig.n(), problem_size_orig.k(), problem_size_orig.batch()),
problem_shape(problem_size.m(), problem_size.n(), problem_size.k(), problem_size.batch()),
hw_info(hw_info),
alpha(options.alpha),
beta(options.beta),
stride_A_orig(cutlass::make_cute_packed_stride(
StrideA{}, make_shape(problem_size_orig.m(), problem_size_orig.k(), problem_size_orig.batch()))),
stride_B_orig(cutlass::make_cute_packed_stride(
StrideB{}, make_shape(problem_size_orig.n(), problem_size_orig.k(), problem_size_orig.batch()))),
stride_C_orig(cutlass::make_cute_packed_stride(
StrideC{}, make_shape(problem_size_orig.m(), problem_size_orig.n(), problem_size_orig.batch()))),
stride_D_orig(cutlass::make_cute_packed_stride(
StrideD{}, make_shape(problem_size_orig.m(), problem_size_orig.n(), problem_size_orig.batch()))),
stride_A(cutlass::make_cute_packed_stride(
StrideA{}, make_shape(problem_size.m(), problem_size.k(), problem_size.batch()))),
stride_B(cutlass::make_cute_packed_stride(
StrideB{}, make_shape(problem_size.n(), problem_size.k(), problem_size.batch()))),
stride_C(cutlass::make_cute_packed_stride(
StrideC{}, make_shape(problem_size.m(), problem_size.n(), problem_size.batch()))),
stride_D(cutlass::make_cute_packed_stride(
StrideD{}, make_shape(problem_size.m(), problem_size.n(), problem_size.batch()))),
tensor_a(problem_size_orig.m() * problem_size_orig.k() * problem_size_orig.batch()),
tensor_b(problem_size_orig.k() * problem_size_orig.n() * problem_size_orig.batch()),
tensor_c(problem_size_orig.m() * problem_size_orig.n() * problem_size_orig.batch()),
tensor_d(problem_size_orig.m() * problem_size_orig.n() * problem_size_orig.batch()),
gather_indices(options.index_size),
tensor_a_gathered(problem_size.m() * problem_size.k() * problem_size_orig.batch()),
tensor_b_gathered(problem_size.k() * problem_size.n() * problem_size_orig.batch()),
tensor_c_gathered(problem_size.m() * problem_size.n() * problem_size_orig.batch()),
tensor_d_gathered(problem_size.m() * problem_size.n() * problem_size_orig.batch()),
tensor_d_reference(problem_size_orig.m() * problem_size_orig.n() * problem_size_orig.batch()),
gemm_mode(problem_size.batch() > 1 ? cutlass::gemm::GemmUniversalMode::kBatched : cutlass::gemm::GemmUniversalMode::kGemm),
gemm(),
// When constructing arguments for gather/scatter gemm, we must pass stride arguments
// made for the original (non-gathered) problem size, because they are used to access
// tensors of the original shape. However we still use the reduced (gathered) problem
// shape since it corresponds to the logical indexing in reduced size GEMM.
arguments{
gemm_mode,
problem_shape,
{
tensor_a.get(),
stride_A_orig,
tensor_b.get(),
stride_B_orig
},
{
{ alpha, beta },
tensor_c.get(), stride_C_orig,
tensor_d.get(), stride_D_orig,
typename Epilogue::GatherC {gather_indices.get()},
typename Epilogue::ScatterD{gather_indices.get()}
},
hw_info,
typename Kernel::GatherA{gather_indices.get()},
typename Kernel::GatherB{gather_indices.get()}
},
workspace(Gemm::get_workspace_size(arguments)),
gemm_ref(),
arguments_ref{
gemm_mode,
problem_shape,
{
DoGatherA ? tensor_a_gathered.get() : tensor_a.get(),
stride_A,
DoGatherB ? tensor_b_gathered.get() : tensor_b.get(),
stride_B
},
{
{ alpha, beta },
DoGatherC ? tensor_c_gathered.get() : tensor_c.get(),
stride_C,
DoScatterD ? tensor_d_gathered.get() : tensor_d_reference.get(),
stride_D
},
hw_info
},
workspace_ref(GemmRef::get_workspace_size(arguments_ref)),
gemm_opt(),
arguments_opt{
gemm_mode,
problem_shape,
{
DoGatherA ? tensor_a_gathered.get() : tensor_a.get(),
stride_A,
DoGatherB ? tensor_b_gathered.get() : tensor_b.get(),
stride_B
},
{
{ alpha, beta },
DoGatherC ? tensor_c_gathered.get() : tensor_c.get(),
stride_C,
DoScatterD ? tensor_d_gathered.get() : tensor_d_reference.get(),
stride_D
},
hw_info
},
workspace_opt(GemmOpt::get_workspace_size(arguments_opt))
{
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::device::BlockFillRandomUniform(tensor_a.get(), tensor_a.size(), 1, ElementA(7), ElementA(-8), 0);
cutlass::reference::device::BlockFillRandomUniform(tensor_b.get(), tensor_b.size(), 1, ElementB(7), ElementB(-8), 0);
cutlass::reference::device::BlockFillRandomUniform(tensor_c.get(), tensor_c.size(), 1, ElementC(7), ElementC(-8), 0);
cutlass::reference::device::BlockFillSequential(tensor_d.get(), tensor_d.size(), ElementD(0), ElementD(0));
// <- Fill gather_indices with unique random integers in range [0,n)
int index_range = GatherModeM ? problem_size_orig.m() : (GatherModeN ? problem_size_orig.n() : problem_size_orig.k());
std::vector<int> indices(index_range);
std::iota(indices.begin(), indices.end(), 0);
{ // std::random_shuffle was deprecated in C++14 and removed in C++17
std::random_device make_seed;
std::mt19937 source_of_randomness(make_seed());
std::shuffle(indices.begin(), indices.end(), source_of_randomness);
}
gather_indices.copy_from_host(indices.data());
auto const gemm_init = [](auto & gemm, auto const & arguments, auto & workspace)
{
cutlass::Status status = gemm.can_implement(arguments);
CUTLASS_CHECK(status);
status = gemm.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
};
gemm_init(gemm, arguments, workspace );
gemm_init(gemm_ref, arguments_ref, workspace_ref);
gemm_init(gemm_opt, arguments_opt, workspace_opt);
}
void debug_output(std::ostream & os)
{
auto print_tensor = [](std::ostream &os, char const * name, auto const & data, auto shape, auto stride)
{
std::vector<remove_cvref_t<decltype(*data.get())>> h_data(data.size());
data.copy_to_host(h_data.data());
Tensor t = make_tensor(h_data.data(), shape, stride);
os << "\n" << name << ": " << std::setw(4) << t << std::endl;
};
{
auto [M,N,K,L] = problem_shape_orig;
print_tensor(os, "A", tensor_a, make_shape(M,K,L), stride_A_orig);
print_tensor(os, "B", tensor_b, make_shape(N,K,L), stride_B_orig);
print_tensor(os, "C", tensor_c, make_shape(M,N,L), stride_C_orig);
print_tensor(os, "D", tensor_d, make_shape(M,N,L), stride_D_orig);
print_tensor(os, "D reference", tensor_d_reference, make_shape(M,N,L), stride_D_orig);
print_tensor(os, "indices", gather_indices, make_shape(gather_indices.size()), make_stride(_1{}));
}
}
template<class Gemm2>
static void run_gemm(Gemm2 &gemm)
{
cutlass::Status status = gemm.run();
CUTLASS_CHECK(status);
}
template<class Gemm2>
void run_reference(Gemm2 &gemm)
{
// Convenience wrapper around calls to separate gather/scatter kernels
auto run_gather = [this](auto call, auto const & input, auto & output, auto gather_func, auto batch_size, auto stride)
{
[[maybe_unused]] auto idx = find_if(stride, [](auto x){ return not is_constant<1, decltype(x)>{}; });
constexpr int I = decltype(idx)::value;
call(input.get(),
output.get(),
gather_func,
batch_size,
static_cast<int>(input.size() / batch_size),
static_cast<int>(output.size() / batch_size),
static_cast<int>(get<I>(stride)),
hw_info);
};
// Forward calls via lambda to avoid specifying template arguments
auto gather_call = [](auto&&... args){ gather(static_cast<decltype(args)&&>(args)...); };
auto scatter_call = [](auto&&... args){ scatter(static_cast<decltype(args)&&>(args)...); };
if constexpr (DoGatherA) {
run_gather(gather_call, tensor_a, tensor_a_gathered, arguments.gather_A, problem_size.batch(), stride_A);
}
if constexpr (DoGatherB) {
run_gather(gather_call, tensor_b, tensor_b_gathered, arguments.gather_B, problem_size.batch(), stride_B);
}
if constexpr (DoGatherC) {
if (beta != ElementComputeEpilogue(0)) {
run_gather(gather_call, tensor_c, tensor_c_gathered, arguments.epilogue.gather_C, problem_size.batch(), stride_C);
}
}
run_gemm(gemm);
if constexpr (DoScatterD) {
run_gather(scatter_call, tensor_d_gathered, tensor_d_reference, arguments.epilogue.scatter_D, problem_size.batch(), stride_D);
}
}
bool verify()
{
run_gemm(gemm);
run_reference(gemm_ref);
cudaDeviceSynchronize();
return cutlass::reference::device::BlockCompareEqual(tensor_d.get(), tensor_d_reference.get(), tensor_d.size());
}
bool run(Options const &options)
{
if (options.reference_check) {
if (!verify()) {
std::cout << "Failed validation" << std::endl;
#if 1
debug_output(std::cout);
#endif
return false;
}
else {
std::cout << "Passed validation" << std::endl;
}
}
//
// Run profiling loop
//
auto const benchmark = [&](auto name, auto func)
{
GpuTimer timer;
timer.start();
for (int iter = 0; iter < options.iterations; ++iter) {
func();
}
timer.stop();
double runtime = timer.elapsed_millis() / double(options.iterations);
double gflops = 2 * double(problem_size.product()) / 1e6 / runtime; // Two flops per multiply-add
std::cout << name << ":\n";
std::cout << " Runtime: " << runtime << " ms\n";
std::cout << " GFLOPs: " << gflops << "\n";
};
benchmark("Fused", [&](){ run_gemm(gemm); });
benchmark("Unfused default", [&](){ run_reference(gemm_ref); });
benchmark("Unfused optimized", [&](){ run_reference(gemm_opt); });
return true;
}
};
} // namespace example
int main(int argc, const char ** argv) {
bool notSupported = false;
// CUDA 12 minimum required
if (__CUDACC_VER_MAJOR__ < 12) {
std::cerr << "This example requires CUDA Toolkit version 12 or later.\n";
notSupported = true;
}
cudaDeviceProp props;
CUDA_CHECK(cudaGetDeviceProperties(&props, 0));
if (props.major < 9) {
std::cerr << "This example requires a device with compute capability 90 or higher.\n";
notSupported = true;
}
if (notSupported) {
return EXIT_SUCCESS; // Do not fail CI checks on unsupported systems
}
example::Options options;
options.parse(argc, argv);
if (options.help) {
options.print_usage(std::cout) << "\n";
return EXIT_SUCCESS;
}
if (!options.valid()) {
std::cerr << "Invalid arguments." << "\n";
return EXIT_FAILURE;
}
cutlass::KernelHardwareInfo hw_info;
hw_info.device_id = 0;
hw_info.sm_count = cutlass::KernelHardwareInfo::query_device_multiprocessor_count(hw_info.device_id);
bool result = true;
#if defined(CUTLASS_ARCH_MMA_SM90_SUPPORTED)
switch (options.mode) {
using namespace example;
case 0: {
std::cout << "Gather A,C + scatter D on M mode:" << std::endl;
using Runner = ExampleRunner<
cutlass::half_t, cutlass::layout::RowMajor, IndexedGather<int>, // A
cutlass::half_t, cutlass::layout::ColumnMajor, NoGather, // B
cutlass::half_t, cutlass::layout::RowMajor, IndexedGather<int>, // C
cutlass::half_t, cutlass::layout::RowMajor, IndexedGather<int>, // D
float, float>;
result &= Runner(options, hw_info).run(options);
break;
}
case 1: {
std::cout << "Gather B,C + scatter D on N mode:" << std::endl;
using Runner = ExampleRunner<
cutlass::half_t, cutlass::layout::RowMajor, NoGather, // A
cutlass::half_t, cutlass::layout::ColumnMajor, IndexedGather<int>, // B
cutlass::half_t, cutlass::layout::ColumnMajor, IndexedGather<int>, // C
cutlass::half_t, cutlass::layout::ColumnMajor, IndexedGather<int>, // D
float, float>;
result &= Runner(options, hw_info).run(options);
break;
}
case 2: {
std::cout << "Gather A,B on K mode:" << std::endl;
using Runner = ExampleRunner<
cutlass::half_t, cutlass::layout::ColumnMajor, IndexedGather<int>, // A
cutlass::half_t, cutlass::layout::RowMajor, IndexedGather<int>, // B
cutlass::half_t, cutlass::layout::RowMajor, NoGather, // C
cutlass::half_t, cutlass::layout::RowMajor, NoGather, // D
float, float>;
result &= Runner(options, hw_info).run(options);
break;
}
}
#endif
return result ? EXIT_SUCCESS : EXIT_FAILURE;
}
|
3f75b6fe500c4615fd0525f79e93e2939b194d1b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h>
#include <torch/script.h>
// clang-format off
// TODO: make spconv_utils.h order agnostic
#include "../spconv_utils.h"
// clang-format on
#include <utils/spconv/spconv/indice.h>
#include <utils/spconv/spconv/reordering.h>
#include "pytorch_cuda_helper.hpp"
torch::Tensor FusedIndiceConvBatchnormCUDAKernelLauncher(
torch::Tensor features, torch::Tensor filters, torch::Tensor bias,
torch::Tensor indicePairs, torch::Tensor indiceNum, int64_t numActOut,
int64_t _inverse, int64_t _subM) {
at::hip::HIPGuardMasqueradingAsCUDA device_guard(features.device());
bool subM = _subM != 0;
bool inverse = _inverse != 0;
auto device = features.device().type();
auto ndim = filters.dim() - 2;
auto kernelVolume = indicePairs.size(0);
auto numInPlanes = features.size(1);
auto numOutPlanes = filters.size(ndim + 1);
auto indicePairNumCpu = indiceNum.to({torch::kCPU});
auto indicePairMaxSizeIter =
std::max_element(indicePairNumCpu.data_ptr<int>(),
indicePairNumCpu.data_ptr<int>() + kernelVolume);
int indicePairMaxOffset =
indicePairMaxSizeIter - indicePairNumCpu.data_ptr<int>();
int indicePairMaxSize = *indicePairMaxSizeIter;
auto options =
torch::TensorOptions().dtype(features.dtype()).device(features.device());
torch::Tensor output =
torch::zeros({numActOut, numOutPlanes}, options).copy_(bias);
torch::Tensor inputBuffer =
torch::zeros({indicePairMaxSize, numInPlanes}, options);
torch::Tensor outputBuffer =
torch::zeros({indicePairMaxSize, numOutPlanes}, options);
filters = filters.view({-1, numInPlanes, numOutPlanes});
if (subM) { // the center index of subm conv don't need gather and scatter
// add.
torch::mm_out(output, features, filters[indicePairMaxOffset]);
}
for (int i = 0; i < kernelVolume; ++i) {
auto nHot = indicePairNumCpu.data_ptr<int>()[i];
if (nHot <= 0 || (subM && i == indicePairMaxOffset)) {
continue;
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
features.scalar_type(), "FusedIndiceConvBatchnormKernel", [&] {
auto outputBufferBlob = torch::from_blob(
outputBuffer.data_ptr<scalar_t>(), {nHot, numOutPlanes}, options);
auto inputBufferBlob = torch::from_blob(
inputBuffer.data_ptr<scalar_t>(), {nHot, numInPlanes}, options);
if (device == torch::kCPU) {
functor::SparseGatherFunctor<tv::CPU, scalar_t, int> gatherFtor;
gatherFtor(tv::CPU(), tv::torch2tv<scalar_t>(inputBuffer),
tv::torch2tv<const scalar_t>(features),
tv::torch2tv<const int>(indicePairs).subview(i, inverse),
nHot);
} else {
functor::SparseGatherFunctor<tv::TorchGPU, scalar_t, int>
gatherFtor;
gatherFtor(tv::TorchGPU(), tv::torch2tv<scalar_t>(inputBuffer),
tv::torch2tv<const scalar_t>(features),
tv::torch2tv<const int>(indicePairs).subview(i, inverse),
nHot);
TV_CHECK_CUDA_ERR();
/* slower than SparseGatherFunctor, may due to int->long conversion
auto indicePairLong = indicePairs[i][inverse].to(torch::kInt64);
auto indicePairBlob =
torch::from_blob(indicePairLong.data_ptr<long>(), {nHot},
indicePairOptions); torch::index_select_out(inputBufferBlob,
features, 0, indicePairBlob);*/
}
torch::mm_out(outputBufferBlob, inputBufferBlob, filters[i]);
if (device == torch::kCPU) {
functor::SparseScatterAddFunctor<tv::CPU, scalar_t, int>
scatterFtor;
scatterFtor(
tv::CPU(), tv::torch2tv<scalar_t>(output),
tv::torch2tv<const scalar_t>(outputBuffer),
tv::torch2tv<const int>(indicePairs).subview(i, !inverse), nHot,
true);
} else {
functor::SparseScatterAddFunctor<tv::TorchGPU, scalar_t, int>
scatterFtor;
scatterFtor(
tv::TorchGPU(), tv::torch2tv<scalar_t>(output),
tv::torch2tv<const scalar_t>(outputBuffer),
tv::torch2tv<const int>(indicePairs).subview(i, !inverse), nHot,
true);
TV_CHECK_CUDA_ERR();
}
});
}
return output;
}
| 3f75b6fe500c4615fd0525f79e93e2939b194d1b.cu | #include <cuda_runtime_api.h>
#include <torch/script.h>
// clang-format off
// TODO: make spconv_utils.h order agnostic
#include "../spconv_utils.h"
// clang-format on
#include <utils/spconv/spconv/indice.h>
#include <utils/spconv/spconv/reordering.h>
#include "pytorch_cuda_helper.hpp"
torch::Tensor FusedIndiceConvBatchnormCUDAKernelLauncher(
torch::Tensor features, torch::Tensor filters, torch::Tensor bias,
torch::Tensor indicePairs, torch::Tensor indiceNum, int64_t numActOut,
int64_t _inverse, int64_t _subM) {
at::cuda::CUDAGuard device_guard(features.device());
bool subM = _subM != 0;
bool inverse = _inverse != 0;
auto device = features.device().type();
auto ndim = filters.dim() - 2;
auto kernelVolume = indicePairs.size(0);
auto numInPlanes = features.size(1);
auto numOutPlanes = filters.size(ndim + 1);
auto indicePairNumCpu = indiceNum.to({torch::kCPU});
auto indicePairMaxSizeIter =
std::max_element(indicePairNumCpu.data_ptr<int>(),
indicePairNumCpu.data_ptr<int>() + kernelVolume);
int indicePairMaxOffset =
indicePairMaxSizeIter - indicePairNumCpu.data_ptr<int>();
int indicePairMaxSize = *indicePairMaxSizeIter;
auto options =
torch::TensorOptions().dtype(features.dtype()).device(features.device());
torch::Tensor output =
torch::zeros({numActOut, numOutPlanes}, options).copy_(bias);
torch::Tensor inputBuffer =
torch::zeros({indicePairMaxSize, numInPlanes}, options);
torch::Tensor outputBuffer =
torch::zeros({indicePairMaxSize, numOutPlanes}, options);
filters = filters.view({-1, numInPlanes, numOutPlanes});
if (subM) { // the center index of subm conv don't need gather and scatter
// add.
torch::mm_out(output, features, filters[indicePairMaxOffset]);
}
for (int i = 0; i < kernelVolume; ++i) {
auto nHot = indicePairNumCpu.data_ptr<int>()[i];
if (nHot <= 0 || (subM && i == indicePairMaxOffset)) {
continue;
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
features.scalar_type(), "FusedIndiceConvBatchnormKernel", [&] {
auto outputBufferBlob = torch::from_blob(
outputBuffer.data_ptr<scalar_t>(), {nHot, numOutPlanes}, options);
auto inputBufferBlob = torch::from_blob(
inputBuffer.data_ptr<scalar_t>(), {nHot, numInPlanes}, options);
if (device == torch::kCPU) {
functor::SparseGatherFunctor<tv::CPU, scalar_t, int> gatherFtor;
gatherFtor(tv::CPU(), tv::torch2tv<scalar_t>(inputBuffer),
tv::torch2tv<const scalar_t>(features),
tv::torch2tv<const int>(indicePairs).subview(i, inverse),
nHot);
} else {
functor::SparseGatherFunctor<tv::TorchGPU, scalar_t, int>
gatherFtor;
gatherFtor(tv::TorchGPU(), tv::torch2tv<scalar_t>(inputBuffer),
tv::torch2tv<const scalar_t>(features),
tv::torch2tv<const int>(indicePairs).subview(i, inverse),
nHot);
TV_CHECK_CUDA_ERR();
/* slower than SparseGatherFunctor, may due to int->long conversion
auto indicePairLong = indicePairs[i][inverse].to(torch::kInt64);
auto indicePairBlob =
torch::from_blob(indicePairLong.data_ptr<long>(), {nHot},
indicePairOptions); torch::index_select_out(inputBufferBlob,
features, 0, indicePairBlob);*/
}
torch::mm_out(outputBufferBlob, inputBufferBlob, filters[i]);
if (device == torch::kCPU) {
functor::SparseScatterAddFunctor<tv::CPU, scalar_t, int>
scatterFtor;
scatterFtor(
tv::CPU(), tv::torch2tv<scalar_t>(output),
tv::torch2tv<const scalar_t>(outputBuffer),
tv::torch2tv<const int>(indicePairs).subview(i, !inverse), nHot,
true);
} else {
functor::SparseScatterAddFunctor<tv::TorchGPU, scalar_t, int>
scatterFtor;
scatterFtor(
tv::TorchGPU(), tv::torch2tv<scalar_t>(output),
tv::torch2tv<const scalar_t>(outputBuffer),
tv::torch2tv<const int>(indicePairs).subview(i, !inverse), nHot,
true);
TV_CHECK_CUDA_ERR();
}
});
}
return output;
}
|
5d8145b8f0b8cd1394a20521d17efc718913192e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "headers/slenet_params.h"
#include "headers/load_mnist.h"
#include "headers/Layer.h"
#include "headers/Slenet_fc_test.h"
// Layer declarations
Layer *convNet;
Layer *ss1Net;
Layer *fcNet;
float convWeights[CONV_FTRS][CONV_WSIZE][CONV_WSIZE];
float convBias[CONV_FTRS];
float ssWeights[SS_FTRS][SS_WSIZE][SS_WSIZE];
float ssBias[SS_FTRS];
float fcWeights[FC_FTRS][FC_WSIZE];
float fcBias[FC_FTRS];
int count = 0;
float forward_pass(double data[INSIZE][INSIZE]) {
float *gInput;
float arr[INSIZE][INSIZE];
for (int i=0; i<INSIZE; i++)
for (int j=0; j<INSIZE; j++)
arr[i][j] = data[i][j];
// Copying input to Cuda memory
gpuErrchk(hipMalloc(&gInput, INSIZE*INSIZE*sizeof(float)));
gpuErrchk(hipMemcpy(gInput, arr, INSIZE*INSIZE*sizeof(float), hipMemcpyDefault));
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
// Convolution
hipLaunchKernelGGL(( kernel_conv_filter), dim3(cf_numBlocks), dim3(cf_threadPerBlock), 0, 0,
(float(*)[INSIZE])gInput,
(float(*)[CONV_OUTSIZE][CONV_OUTSIZE])convNet->pre_output,
(float(*)[CONV_WSIZE][CONV_WSIZE])convNet->weight);
hipLaunchKernelGGL(( kernel_conv_bias), dim3(cb_numBlocks), dim3(cb_threadPerBlock), 0, 0,
(float(*)[CONV_OUTSIZE][CONV_OUTSIZE])convNet->pre_output,
convNet->bias);
hipLaunchKernelGGL(( kernel_conv_sigmoid), dim3(cs_numBlocks), dim3(cs_threadPerBlock), 0, 0,
(float(*)[CONV_OUTSIZE][CONV_OUTSIZE])convNet->pre_output,
(float(*)[CONV_OUTSIZE][CONV_OUTSIZE])convNet->output);
// Subsampling
hipLaunchKernelGGL(( kernel_ss1_filter), dim3(ssf_numBlocks), dim3(ssf_threadPerBlock), 0, 0,
(float(*)[CONV_OUTSIZE][CONV_OUTSIZE])convNet->output,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->pre_output,
(float(*)[SS_WSIZE][SS_WSIZE])ss1Net->weight);
hipLaunchKernelGGL(( kernel_ss1_bias), dim3(ssb_numBlocks), dim3(ssb_threadPerBlock), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->pre_output,
ss1Net->bias);
hipLaunchKernelGGL(( kernel_ss1_sigmoid), dim3(sss_numBlocks), dim3(sss_threadPerBlock), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->pre_output,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output);
// Fully Connected
hipLaunchKernelGGL(( kernel_fc1_filter7_10), dim3(fcfNumBlocks7_10), dim3(fcfNthreadPerBlock7_10), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
hipLaunchKernelGGL(( kernel_fc1_filter7_5), dim3(fcfNumBlocks7_5), dim3(fcfNthreadPerBlock7_5), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
hipLaunchKernelGGL(( kernel_fc1_filter7_3), dim3(fcfNumBlocks7_3), dim3(fcfNthreadPerBlock7_3), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
hipLaunchKernelGGL(( kernel_fc1_filter7_1), dim3(fcfNumBlocks7_1), dim3(fcfNthreadPerBlock7_1), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
hipLaunchKernelGGL(( kernel_fc1_filter4_10), dim3(fcfNumBlocks4_10), dim3(fcfNthreadPerBlock4_10), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
hipLaunchKernelGGL(( kernel_fc1_filter4_5), dim3(fcfNumBlocks4_5), dim3(fcfNthreadPerBlock4_5), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
hipLaunchKernelGGL(( kernel_fc1_filter4_3), dim3(fcfNumBlocks4_3), dim3(fcfNthreadPerBlock4_3), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
hipLaunchKernelGGL(( kernel_fc1_filter4_1), dim3(fcfNumBlocks4_1), dim3(fcfNthreadPerBlock4_1), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
hipLaunchKernelGGL(( kernel_fc1_filter2_10), dim3(fcfNumBlocks2_10), dim3(fcfNthreadPerBlock2_10), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
hipLaunchKernelGGL(( kernel_fc1_filter2_5), dim3(fcfNumBlocks2_5), dim3(fcfNthreadPerBlock2_5), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
hipLaunchKernelGGL(( kernel_fc1_filter2_3), dim3(fcfNumBlocks2_3), dim3(fcfNthreadPerBlock2_3), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
hipLaunchKernelGGL(( kernel_fc1_filter1_10), dim3(fcfNumBlocks1_10), dim3(fcfNthreadPerBlock1_10), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
hipLaunchKernelGGL(( kernel_fc1_filter1_5), dim3(fcfNumBlocks1_5), dim3(fcfNthreadPerBlock1_5), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
hipLaunchKernelGGL(( kernel_fc1_filter1_3), dim3(fcfNumBlocks1_3), dim3(fcfNthreadPerBlock1_3), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
hipLaunchKernelGGL(( kernel_fc1_filter7_10sh), dim3(fcfNumBlocks7_10), dim3(fcfNthreadPerBlock7_10), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
hipLaunchKernelGGL(( kernel_fc1_filter7_5sh), dim3(fcfNumBlocks7_5), dim3(fcfNthreadPerBlock7_5), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
hipLaunchKernelGGL(( kernel_fc1_filter7_3sh), dim3(fcfNumBlocks7_3), dim3(fcfNthreadPerBlock7_3), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
hipLaunchKernelGGL(( kernel_fc1_filter7_1sh), dim3(fcfNumBlocks7_1), dim3(fcfNthreadPerBlock7_1), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
hipLaunchKernelGGL(( kernel_fc1_filter4_10sh), dim3(fcfNumBlocks4_10), dim3(fcfNthreadPerBlock4_10), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
hipLaunchKernelGGL(( kernel_fc1_filter4_5sh), dim3(fcfNumBlocks4_5), dim3(fcfNthreadPerBlock4_5), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
hipLaunchKernelGGL(( kernel_fc1_filter4_3sh), dim3(fcfNumBlocks4_3), dim3(fcfNthreadPerBlock4_3), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
hipLaunchKernelGGL(( kernel_fc1_filter4_1sh), dim3(fcfNumBlocks4_1), dim3(fcfNthreadPerBlock4_1), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
hipLaunchKernelGGL(( kernel_fc1_filter2_10sh), dim3(fcfNumBlocks2_10), dim3(fcfNthreadPerBlock2_10), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
hipLaunchKernelGGL(( kernel_fc1_filter2_5sh), dim3(fcfNumBlocks2_5), dim3(fcfNthreadPerBlock2_5), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
hipLaunchKernelGGL(( kernel_fc1_filter2_3sh), dim3(fcfNumBlocks2_3), dim3(fcfNthreadPerBlock2_3), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
hipLaunchKernelGGL(( kernel_fc1_filter1_10sh), dim3(fcfNumBlocks1_10), dim3(fcfNthreadPerBlock1_10), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
hipLaunchKernelGGL(( kernel_fc1_filter1_5sh), dim3(fcfNumBlocks1_5), dim3(fcfNthreadPerBlock1_5), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
hipLaunchKernelGGL(( kernel_fc1_filter1_3sh), dim3(fcfNumBlocks1_3), dim3(fcfNthreadPerBlock1_3), 0, 0,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
hipLaunchKernelGGL(( kernel_fc1_bias), dim3(fcbsNumBlocks), dim3(fcbsNthreadPerBlock), 0, 0, fcNet->pre_output, fcNet->bias);
hipLaunchKernelGGL(( kernel_fc1_sigmoid), dim3(fcbsNumBlocks), dim3(fcbsNthreadPerBlock), 0, 0, fcNet->pre_output, fcNet->output);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float eltime;
hipEventElapsedTime(&eltime, start, stop);
return eltime;
}
int main() {
int ret; int i;
mnist_data *dataset = new mnist_data[10000];
static unsigned int test_cnt;
// load data
if (ret = mnist_load("data/t10k-images.idx3-ubyte", "data/t10k-labels.idx1-ubyte", &dataset, &test_cnt) != 0)
printf("An error occurred: %d \n", ret);
else
printf("test_cnt = %d \n", test_cnt); // test_cnt must have the number of test images (i.e., 10K)
for (int i=0; i < CONV_FTRS; i++)
for (int j = 0; j < CONV_WSIZE; j++)
for (int k = 0; k < CONV_WSIZE; k++)
convWeights[i][j][k] = (i < 6) ? c1_weight[i][j*CONV_WSIZE+k] : 0;
for (int i=0; i < CONV_FTRS; i++)
convBias[i] = (i < 6) ? c1_bias[i] : 0;
for (int i=0; i < SS_FTRS; i++)
for (int j = 0; j < SS_WSIZE; j++)
for (int k = 0; k < SS_WSIZE; k++)
ssWeights[i][j][k] = s2_weight[i][j*SS_WSIZE+k];
for (int i=0; i < SS_FTRS; i++)
ssBias[i] = s2_bias[i];
for (int i=0; i < FC_FTRS; i++)
for (int j=0; j < FC_WSIZE; j++)
if (i < 10 && j < 216)
fcWeights[i][j] = f3_weight[i][j];
else
fcWeights[i][j] = 0;
for (int i=0; i < FC_FTRS; i++)
if (i < 10)
fcBias[i] = f3_bias[i];
else
fcBias[i] = 0;
convNet = new Layer(CONV_WSIZE*CONV_WSIZE, CONV_FTRS, CONV_FTRS*CONV_OUTSIZE*CONV_OUTSIZE);
ss1Net = new Layer(SS_WSIZE*SS_WSIZE, SS_FTRS, CONV_FTRS*SS_OUTSIZE*SS_OUTSIZE);
fcNet = new Layer(FC_WSIZE, FC_FTRS, FC_OUTSIZE);
gpuErrchk(hipMemcpy(convNet->weight,
convWeights,
CONV_WSIZE * CONV_WSIZE * CONV_FTRS * sizeof(float),
hipMemcpyDefault));
gpuErrchk(hipMemcpy(convNet->bias,
convBias,
CONV_FTRS * sizeof(float),
hipMemcpyDefault));
gpuErrchk(hipMemcpy(ss1Net->weight,
ssWeights,
SS_FTRS * SS_WSIZE * SS_WSIZE * sizeof(float),
hipMemcpyDefault));
gpuErrchk(hipMemcpy(ss1Net->bias,
ssBias,
SS_FTRS * sizeof(float),
hipMemcpyDefault));
gpuErrchk(hipMemcpy(fcNet->weight,
fcWeights, FC_FTRS * FC_WSIZE * sizeof(float),
hipMemcpyDefault));
gpuErrchk(hipMemcpy(fcNet->bias,
fcBias, FC_FTRS * sizeof(float),
hipMemcpyDefault));
float time_taken = 0;
unsigned int error = 0;
unsigned int max = 0;
float res[FC_OUTSIZE];
for (i=0; i<1; i++) {
time_taken += forward_pass(dataset[i].data);
hipMemcpy(res, fcNet->output, sizeof(float)*FC_OUTSIZE, hipMemcpyDefault);
for(int j=0; j<10; j++) {
if (res[max] < res[j])
max = j;
}
if (max != dataset[i].label) ++error; // error must have the number of incorrect predictions.
}
// printf("Error Rate = %f%% (%d out of 10,000)\n", double(error)/double(test_cnt)*100.0, error);
// printf("Accuracy = %.3f%% (%d out of 10,000)\n",
// 100.0 - double(error)/double(test_cnt)*100.0, test_cnt - error);
// printf("Ex time = %f (ms) \n", time_taken);
delete[] dataset;
delete convNet;
delete ss1Net;
delete fcNet;
return 0;
}
| 5d8145b8f0b8cd1394a20521d17efc718913192e.cu | #include <stdio.h>
#include <stdlib.h>
#include "headers/slenet_params.h"
#include "headers/load_mnist.h"
#include "headers/Layer.h"
#include "headers/Slenet_fc_test.h"
// Layer declarations
Layer *convNet;
Layer *ss1Net;
Layer *fcNet;
float convWeights[CONV_FTRS][CONV_WSIZE][CONV_WSIZE];
float convBias[CONV_FTRS];
float ssWeights[SS_FTRS][SS_WSIZE][SS_WSIZE];
float ssBias[SS_FTRS];
float fcWeights[FC_FTRS][FC_WSIZE];
float fcBias[FC_FTRS];
int count = 0;
float forward_pass(double data[INSIZE][INSIZE]) {
float *gInput;
float arr[INSIZE][INSIZE];
for (int i=0; i<INSIZE; i++)
for (int j=0; j<INSIZE; j++)
arr[i][j] = data[i][j];
// Copying input to Cuda memory
gpuErrchk(cudaMalloc(&gInput, INSIZE*INSIZE*sizeof(float)));
gpuErrchk(cudaMemcpy(gInput, arr, INSIZE*INSIZE*sizeof(float), cudaMemcpyDefault));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// Convolution
kernel_conv_filter<<<cf_numBlocks, cf_threadPerBlock>>>(
(float(*)[INSIZE])gInput,
(float(*)[CONV_OUTSIZE][CONV_OUTSIZE])convNet->pre_output,
(float(*)[CONV_WSIZE][CONV_WSIZE])convNet->weight);
kernel_conv_bias<<<cb_numBlocks, cb_threadPerBlock>>>(
(float(*)[CONV_OUTSIZE][CONV_OUTSIZE])convNet->pre_output,
convNet->bias);
kernel_conv_sigmoid<<<cs_numBlocks, cs_threadPerBlock>>>(
(float(*)[CONV_OUTSIZE][CONV_OUTSIZE])convNet->pre_output,
(float(*)[CONV_OUTSIZE][CONV_OUTSIZE])convNet->output);
// Subsampling
kernel_ss1_filter<<<ssf_numBlocks, ssf_threadPerBlock>>>(
(float(*)[CONV_OUTSIZE][CONV_OUTSIZE])convNet->output,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->pre_output,
(float(*)[SS_WSIZE][SS_WSIZE])ss1Net->weight);
kernel_ss1_bias<<<ssb_numBlocks, ssb_threadPerBlock>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->pre_output,
ss1Net->bias);
kernel_ss1_sigmoid<<<sss_numBlocks, sss_threadPerBlock>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->pre_output,
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output);
// Fully Connected
kernel_fc1_filter7_10<<<fcfNumBlocks7_10, fcfNthreadPerBlock7_10>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
kernel_fc1_filter7_5<<<fcfNumBlocks7_5, fcfNthreadPerBlock7_5>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
kernel_fc1_filter7_3<<<fcfNumBlocks7_3, fcfNthreadPerBlock7_3>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
kernel_fc1_filter7_1<<<fcfNumBlocks7_1, fcfNthreadPerBlock7_1>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
kernel_fc1_filter4_10<<<fcfNumBlocks4_10, fcfNthreadPerBlock4_10>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
kernel_fc1_filter4_5<<<fcfNumBlocks4_5, fcfNthreadPerBlock4_5>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
kernel_fc1_filter4_3<<<fcfNumBlocks4_3, fcfNthreadPerBlock4_3>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
kernel_fc1_filter4_1<<<fcfNumBlocks4_1, fcfNthreadPerBlock4_1>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
kernel_fc1_filter2_10<<<fcfNumBlocks2_10, fcfNthreadPerBlock2_10>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
kernel_fc1_filter2_5<<<fcfNumBlocks2_5, fcfNthreadPerBlock2_5>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
kernel_fc1_filter2_3<<<fcfNumBlocks2_3, fcfNthreadPerBlock2_3>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
kernel_fc1_filter1_10<<<fcfNumBlocks1_10, fcfNthreadPerBlock1_10>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
kernel_fc1_filter1_5<<<fcfNumBlocks1_5, fcfNthreadPerBlock1_5>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
kernel_fc1_filter1_3<<<fcfNumBlocks1_3, fcfNthreadPerBlock1_3>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
kernel_fc1_filter7_10sh<<<fcfNumBlocks7_10, fcfNthreadPerBlock7_10>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
kernel_fc1_filter7_5sh<<<fcfNumBlocks7_5, fcfNthreadPerBlock7_5>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
kernel_fc1_filter7_3sh<<<fcfNumBlocks7_3, fcfNthreadPerBlock7_3>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
kernel_fc1_filter7_1sh<<<fcfNumBlocks7_1, fcfNthreadPerBlock7_1>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
kernel_fc1_filter4_10sh<<<fcfNumBlocks4_10, fcfNthreadPerBlock4_10>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
kernel_fc1_filter4_5sh<<<fcfNumBlocks4_5, fcfNthreadPerBlock4_5>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
kernel_fc1_filter4_3sh<<<fcfNumBlocks4_3, fcfNthreadPerBlock4_3>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
kernel_fc1_filter4_1sh<<<fcfNumBlocks4_1, fcfNthreadPerBlock4_1>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
kernel_fc1_filter2_10sh<<<fcfNumBlocks2_10, fcfNthreadPerBlock2_10>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
kernel_fc1_filter2_5sh<<<fcfNumBlocks2_5, fcfNthreadPerBlock2_5>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
kernel_fc1_filter2_3sh<<<fcfNumBlocks2_3, fcfNthreadPerBlock2_3>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
kernel_fc1_filter1_10sh<<<fcfNumBlocks1_10, fcfNthreadPerBlock1_10>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
kernel_fc1_filter1_5sh<<<fcfNumBlocks1_5, fcfNthreadPerBlock1_5>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
kernel_fc1_filter1_3sh<<<fcfNumBlocks1_3, fcfNthreadPerBlock1_3>>>(
(float(*)[SS_OUTSIZE][SS_OUTSIZE])ss1Net->output,
fcNet->pre_output,
(float(*)[FC_WSIZE])fcNet->weight);
kernel_fc1_bias<<<fcbsNumBlocks, fcbsNthreadPerBlock>>>(fcNet->pre_output, fcNet->bias);
kernel_fc1_sigmoid<<<fcbsNumBlocks, fcbsNthreadPerBlock>>>(fcNet->pre_output, fcNet->output);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float eltime;
cudaEventElapsedTime(&eltime, start, stop);
return eltime;
}
int main() {
int ret; int i;
mnist_data *dataset = new mnist_data[10000];
static unsigned int test_cnt;
// load data
if (ret = mnist_load("data/t10k-images.idx3-ubyte", "data/t10k-labels.idx1-ubyte", &dataset, &test_cnt) != 0)
printf("An error occurred: %d \n", ret);
else
printf("test_cnt = %d \n", test_cnt); // test_cnt must have the number of test images (i.e., 10K)
for (int i=0; i < CONV_FTRS; i++)
for (int j = 0; j < CONV_WSIZE; j++)
for (int k = 0; k < CONV_WSIZE; k++)
convWeights[i][j][k] = (i < 6) ? c1_weight[i][j*CONV_WSIZE+k] : 0;
for (int i=0; i < CONV_FTRS; i++)
convBias[i] = (i < 6) ? c1_bias[i] : 0;
for (int i=0; i < SS_FTRS; i++)
for (int j = 0; j < SS_WSIZE; j++)
for (int k = 0; k < SS_WSIZE; k++)
ssWeights[i][j][k] = s2_weight[i][j*SS_WSIZE+k];
for (int i=0; i < SS_FTRS; i++)
ssBias[i] = s2_bias[i];
for (int i=0; i < FC_FTRS; i++)
for (int j=0; j < FC_WSIZE; j++)
if (i < 10 && j < 216)
fcWeights[i][j] = f3_weight[i][j];
else
fcWeights[i][j] = 0;
for (int i=0; i < FC_FTRS; i++)
if (i < 10)
fcBias[i] = f3_bias[i];
else
fcBias[i] = 0;
convNet = new Layer(CONV_WSIZE*CONV_WSIZE, CONV_FTRS, CONV_FTRS*CONV_OUTSIZE*CONV_OUTSIZE);
ss1Net = new Layer(SS_WSIZE*SS_WSIZE, SS_FTRS, CONV_FTRS*SS_OUTSIZE*SS_OUTSIZE);
fcNet = new Layer(FC_WSIZE, FC_FTRS, FC_OUTSIZE);
gpuErrchk(cudaMemcpy(convNet->weight,
convWeights,
CONV_WSIZE * CONV_WSIZE * CONV_FTRS * sizeof(float),
cudaMemcpyDefault));
gpuErrchk(cudaMemcpy(convNet->bias,
convBias,
CONV_FTRS * sizeof(float),
cudaMemcpyDefault));
gpuErrchk(cudaMemcpy(ss1Net->weight,
ssWeights,
SS_FTRS * SS_WSIZE * SS_WSIZE * sizeof(float),
cudaMemcpyDefault));
gpuErrchk(cudaMemcpy(ss1Net->bias,
ssBias,
SS_FTRS * sizeof(float),
cudaMemcpyDefault));
gpuErrchk(cudaMemcpy(fcNet->weight,
fcWeights, FC_FTRS * FC_WSIZE * sizeof(float),
cudaMemcpyDefault));
gpuErrchk(cudaMemcpy(fcNet->bias,
fcBias, FC_FTRS * sizeof(float),
cudaMemcpyDefault));
float time_taken = 0;
unsigned int error = 0;
unsigned int max = 0;
float res[FC_OUTSIZE];
for (i=0; i<1; i++) {
time_taken += forward_pass(dataset[i].data);
cudaMemcpy(res, fcNet->output, sizeof(float)*FC_OUTSIZE, cudaMemcpyDefault);
for(int j=0; j<10; j++) {
if (res[max] < res[j])
max = j;
}
if (max != dataset[i].label) ++error; // error must have the number of incorrect predictions.
}
// printf("Error Rate = %f%% (%d out of 10,000)\n", double(error)/double(test_cnt)*100.0, error);
// printf("Accuracy = %.3f%% (%d out of 10,000)\n",
// 100.0 - double(error)/double(test_cnt)*100.0, test_cnt - error);
// printf("Ex time = %f (ms) \n", time_taken);
delete[] dataset;
delete convNet;
delete ss1Net;
delete fcNet;
return 0;
}
|
96cde4e75603f723ad901ec5543c7f232a3a8203.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
int main(void)
{
int count;
hipGetDeviceCount(&count);
printf("%d devices found supporting CUDA\n", count);
char split[] = "----------------------------------\n";
hipDeviceProp_t p;
for(int d = 0; d < count; d++){
hipGetDeviceProperties(&p, d);
printf("%s", split);
printf("Device %s\n", p.name);
printf("%s", split);
printf(" Device memory: \t%zu\n", p.totalGlobalMem);
printf(" Memory per-block: \t%lu\n", p.sharedMemPerBlock);
printf(" Register per-block: \t%d\n", p.regsPerBlock);
printf(" Warp size: \t\t%d\n", p.warpSize);
printf(" Memory pitch: \t\t%lu\n", p.memPitch);
printf(" Constant Memory: \t%lu\n", p.totalConstMem);
printf(" Max thread per-block: \t%d\n", p.maxThreadsPerBlock);
printf(" Max thread dim: \t%d / %d / %d\n", p.maxThreadsDim[0], p.maxThreadsDim[1], p.maxThreadsDim[2]);
printf(" Max grid size: \t%d / %d / %d\n", p.maxGridSize[0], p.maxGridSize[1], p.maxGridSize[2]);
printf(" Ver: \t\t\t%d.%d\n", p.major, p.minor);
printf(" Clock: \t\t%d\n", p.clockRate);
printf(" Texture Alignment: \t%lu\n", p.textureAlignment);
}
return 0;
}
| 96cde4e75603f723ad901ec5543c7f232a3a8203.cu | #include <cstdio>
int main(void)
{
int count;
cudaGetDeviceCount(&count);
printf("%d devices found supporting CUDA\n", count);
char split[] = "----------------------------------\n";
cudaDeviceProp p;
for(int d = 0; d < count; d++){
cudaGetDeviceProperties(&p, d);
printf("%s", split);
printf("Device %s\n", p.name);
printf("%s", split);
printf(" Device memory: \t%zu\n", p.totalGlobalMem);
printf(" Memory per-block: \t%lu\n", p.sharedMemPerBlock);
printf(" Register per-block: \t%d\n", p.regsPerBlock);
printf(" Warp size: \t\t%d\n", p.warpSize);
printf(" Memory pitch: \t\t%lu\n", p.memPitch);
printf(" Constant Memory: \t%lu\n", p.totalConstMem);
printf(" Max thread per-block: \t%d\n", p.maxThreadsPerBlock);
printf(" Max thread dim: \t%d / %d / %d\n", p.maxThreadsDim[0], p.maxThreadsDim[1], p.maxThreadsDim[2]);
printf(" Max grid size: \t%d / %d / %d\n", p.maxGridSize[0], p.maxGridSize[1], p.maxGridSize[2]);
printf(" Ver: \t\t\t%d.%d\n", p.major, p.minor);
printf(" Clock: \t\t%d\n", p.clockRate);
printf(" Texture Alignment: \t%lu\n", p.textureAlignment);
}
return 0;
}
|
b3cd07723478c5a3809ef3f2a3706329292e401c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void cube(float * d_out, float * d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f * f;
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
hipMalloc((void**) &d_in, ARRAY_BYTES);
hipMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
// launch the kernel
hipLaunchKernelGGL(( cube), dim3(1), dim3(ARRAY_SIZE), 0, 0, d_out, d_in);
// copy back the result array to the CPU
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
hipFree(d_in);
hipFree(d_out);
return 0;
}
| b3cd07723478c5a3809ef3f2a3706329292e401c.cu | #include <stdio.h>
__global__ void cube(float * d_out, float * d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f * f;
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
cudaMalloc((void**) &d_in, ARRAY_BYTES);
cudaMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
// launch the kernel
cube<<<1, ARRAY_SIZE>>>(d_out, d_in);
// copy back the result array to the CPU
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
81c6c7489ee2cbf844c42c4695bf3dc1399cf608.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <torch/extension.h>
#include <iostream>
#include <vector>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <THH/THH.h>
#include <THH/THHDeviceUtils.cuh>
#include <math.h>
#include <algorithm>
#include <stdlib.h>
#include "cpu/vision.h"
/*rle cuda kernels are cuda version of the corresponding cpu functions here
https://github.com/cocodataset/cocoapi/blob/master/common/maskApi.c
these are only a subset of rle kernels.*/
typedef unsigned int uint;
typedef unsigned long siz;
typedef unsigned char byte;
//6144 is based on minimum shared memory size per SM
//across all pytorch-supported GPUs. Need to use blocking
//to avoid this restriction
const int BUFFER_SIZE=6144;
const int CNTS_SIZE=6144;
__global__ void crop_and_scale_cuda_kernel(double *dense_poly_data, int *per_anchor_poly_idx, int *poly_rel_idx,
int poly_count, int anchor_count, float4 *anchor_data, int mask_size){
int tid = threadIdx.x;
int block_jump = blockDim.x;
int poly_id = blockIdx.x;
int anchor_idx;
for (anchor_idx = 0; anchor_idx < anchor_count; anchor_idx++){
if (poly_id < per_anchor_poly_idx[anchor_idx + 1]) break;
}
float w = anchor_data[anchor_idx].z - anchor_data[anchor_idx].x;
float h = anchor_data[anchor_idx].w - anchor_data[anchor_idx].y;
w = fmaxf(w, 1.0f);
h = fmaxf(h, 1.0f);
float ratio_h = ((float) mask_size) / h;
float ratio_w = ((float) mask_size) / w;
int poly_ptr_idx_start = poly_rel_idx[poly_id];
int poly_ptr_idx_end = poly_rel_idx[poly_id + 1];
double *poly_data_buf = dense_poly_data + poly_ptr_idx_start;
int len = poly_ptr_idx_end - poly_ptr_idx_start;
for (int j = tid; j < len; j += block_jump){
if (j % 2 == 0) poly_data_buf[j] = ratio_w*((float) poly_data_buf[j]- anchor_data[anchor_idx].x);
if (j % 2 == 1) poly_data_buf[j] = ratio_h*((float) poly_data_buf[j]- anchor_data[anchor_idx].y);
}
}
//merging masks happens on mask format, not RLE format.
__global__ void merge_masks_cuda_kernel(byte *masks_in, float *masks_out, const int mask_size,
int *per_anchor_poly_idx, int anchor_count){
int anchor_idx = blockIdx.x;
int tid = threadIdx.x;
int jump_block = blockDim.x;
int mask_start_idx = per_anchor_poly_idx[anchor_idx];
int num_of_masks_to_merge = per_anchor_poly_idx[anchor_idx + 1]-per_anchor_poly_idx[anchor_idx];
for(int j = tid; j < mask_size * mask_size; j += jump_block){
int transposed_pixel = (j % mask_size) * mask_size + j / mask_size;
byte pixel = 0;
for(int k = 0; k < num_of_masks_to_merge; k++){
if (masks_in[(mask_start_idx + k) * mask_size * mask_size + j] == 1) pixel = 1;
if (pixel == 1) break;
}
masks_out[anchor_idx * mask_size * mask_size + transposed_pixel] = (float) pixel;
}
}
/*cuda version of rleDecode function in this API:
https://github.com/cocodataset/cocoapi/blob/master/common/maskApi.c*/
__global__ void decode_rle_cuda_kernel(const int *num_of_cnts, uint *cnts, long h, long w, byte *mask)
{
int poly_id = blockIdx.x;
int tid = threadIdx.x;
int block_jump = blockDim.x;
int m = num_of_cnts[poly_id];
uint *cnts_buf = cnts + CNTS_SIZE * poly_id;
byte *mask_ptr = mask + poly_id * h * w;
__shared__ uint shbuf1[CNTS_SIZE];
__shared__ uint shbuf2[CNTS_SIZE];
//initialize shbuf for scan. first element is 0 (exclusive scan)
for (long i = tid; i < CNTS_SIZE; i += block_jump){
shbuf1[i] = (i <= m & i > 0) ? cnts_buf[i - 1]:0;
shbuf2[i] = (i <= m & i > 0) ? cnts_buf[i - 1]:0;
}
__syncthreads();
//double buffering for scan
int switch_buf = 0;
for (int offset = 1; offset <= m; offset *= 2){
switch_buf = 1 - switch_buf;
if(switch_buf == 0){
for(int j = tid;j <= m;j += block_jump){
if(j >= offset) shbuf2[j] = shbuf1[j]+shbuf1[j - offset];
else shbuf2[j] = shbuf1[j];
}
}else if (switch_buf == 1){
for(int j = tid;j <= m;j += block_jump){
if(j >= offset) shbuf1[j] = shbuf2[j] + shbuf2[j - offset];
else shbuf1[j] = shbuf2[j];
}
}
__syncthreads();
}
uint *scanned_buf = switch_buf == 0 ? shbuf2 : shbuf1;
//find which bin pixel j falls into , which determines the pixel value
//use binary search
for(int j = tid; j < h * w; j += block_jump){
int bin = 0;
int min_idx = 0;
int max_idx = m;
int mid_idx = m / 2;
while(max_idx > min_idx){
if(j > scanned_buf[mid_idx]) {
min_idx = mid_idx+1;
mid_idx = (min_idx + max_idx) / 2;
}
else if (j < scanned_buf[mid_idx]) {
max_idx = mid_idx;
mid_idx = (min_idx + max_idx) / 2;
}
else {
mid_idx++;
break;
}
}
int k = mid_idx;
byte pixel = k % 2 == 0 ? 1 : 0;
mask_ptr[j] = pixel;
}
}
/*cuda version of rleFrPoly function in this API:
https://github.com/cocodataset/cocoapi/blob/master/common/maskApi.c*/
__global__ void rle_fr_poly_cuda_kernel(const double *dense_coordinates, int *poly_rel_idx, long h, long w,
uint *cnts, int *x_in, int *y_in, int *u_in, int *v_in, uint *a_in,
uint *b_in, int *num_of_cnts) {
int poly_id = blockIdx.x;
int tid = threadIdx.x;
int block_jump = blockDim.x;
long cnts_offset = poly_id * CNTS_SIZE;
long k = (poly_rel_idx[poly_id + 1] - poly_rel_idx[poly_id]) / 2;
const double *xy = dense_coordinates + poly_rel_idx[poly_id];
int *x = x_in + poly_id * BUFFER_SIZE;
int *y = y_in + poly_id * BUFFER_SIZE;
int *u = u_in + poly_id * BUFFER_SIZE;
int *v = v_in + poly_id * BUFFER_SIZE;
uint *a = a_in + poly_id * BUFFER_SIZE;
uint *b = b_in + poly_id * BUFFER_SIZE;
/* upsample and get discrete points densely along entire boundary */
long j, m = 0;
double scale = 5;
__shared__ int shbuf1[BUFFER_SIZE];
__shared__ int shbuf2[BUFFER_SIZE];
for(long j = tid; j < BUFFER_SIZE; j += block_jump) {
shbuf1[j] = 0;
shbuf2[j] = 0;
}
for(long j = tid; j <= k; j += block_jump)
x[j] = j < k ? ((int) (scale * xy[2 * j + 0] + 0.5)) : ((int) (scale * xy[0] + 0.5));
for(long j = tid; j <= k; j += block_jump)
y[j] = j < k ? ((int) (scale * xy[2 * j + 1] + 0.5)) : ((int) (scale * xy[1] + 0.5));
__syncthreads();
for(int j = tid; j < k; j += block_jump){
int xs = x[j], xe = x[j + 1], ys = y[j], ye = y[j + 1], dx, dy, t, d, dist;
int flip;
double s;
dx = abs(xe - xs);
dy = abs(ys - ye);
flip = (dx >= dy && xs > xe) || (dx < dy && ys > ye);
if (flip) {t = xs; xs = xe; xe = t; t = ys; ys = ye; ye = t;}
s = dx >= dy ? (double) (ye - ys) / dx : (double) (xe - xs) / dy;
dist = dx >= dy ? dx + 1 : dy + 1;
shbuf1[j + 1] = dist;
shbuf2[j + 1] = dist;
}
__syncthreads();
//block-wide exclusive prefix scan
int switch_buf = 0;
for (int offset = 1; offset <= k; offset *= 2){
switch_buf = 1 - switch_buf;
if (switch_buf == 0){
for(int j = tid; j <= k; j += block_jump){
if (j >= offset) shbuf2[j] = shbuf1[j] + shbuf1[j - offset];
else shbuf2[j] = shbuf1[j];
}
}
else if (switch_buf == 1){
for(int j = tid; j <= k; j += block_jump){
if (j >= offset) shbuf1[j] = shbuf2[j] + shbuf2[j - offset];
else shbuf1[j] = shbuf2[j];
}
}
__syncthreads();
}
for (int j = tid; j < k; j += block_jump){
int xs = x[j], xe = x[j + 1], ys = y[j], ye = y[j + 1], dx, dy, t, d, dist;
int flip;
double s;
dx = __sad(xe, xs, 0);
dy = __sad(ys, ye, 0);
flip = (dx >= dy && xs > xe) || (dx < dy && ys > ye);
if (flip) {t = xs; xs = xe; xe = t; t = ys; ys = ye; ye = t;}
s = dx >= dy ? (double) (ye - ys) / dx : (double) (xe - xs) / dy;
m = switch_buf == 0 ? shbuf2[j] : shbuf1[j];
if (dx >= dy) for (d = 0; d <= dx; d++) {
/*the multiplication statement 's*t' causes nvcc to optimize with flush-to-zero=True for
double precision multiply, which we observe produces different results than CPU occasionally.
To force flush-to-zero=False, we use __dmul_rn intrinsics function */
t = flip ? dx - d : d;
u[m] = t + xs;
v[m] = (int) (ys + __dmul_rn(s, t) + .5);
m++;
}
else for (d = 0; d <= dy; d++) {
t = flip ? dy - d : d;
v[m] = t + ys;
u[m] = (int) (xs + __dmul_rn(s, t) + .5);
m++;
}
}
__syncthreads();
m = switch_buf == 0 ? shbuf2[k] : shbuf1[k];
int k2 = m;
__syncthreads();
double xd, yd;
if (tid == 0) {
shbuf1[tid] = 0;
shbuf2[tid] = 0;
}
/* get points along y-boundary and downsample */
for (int j = tid; j < k2; j += block_jump){
if (j > 0){
if (u[j] != u[j - 1]){
xd = (double) (u[j] < u[j-1] ? u[j] : u[j] - 1);
xd = (xd + .5) / scale - .5;
if (floor(xd) != xd || xd < 0 || xd > w - 1 ) {
shbuf1[j] = 0;
shbuf2[j] = 0;
continue;
}
yd = (double) (v[j] < v[j - 1] ? v[j] : v[j - 1]); yd = (yd + .5) / scale - .5;
if (yd < 0) yd = 0;
else if (yd > h) yd = h; yd = ceil(yd);
shbuf1[j] = 1;
shbuf2[j] = 1;
} else {
shbuf1[j] = 0;
shbuf2[j] = 0;
}
}
}
__syncthreads();
//exclusive prefix scan
switch_buf = 0;
for (int offset = 1; offset < k2; offset *= 2){
switch_buf = 1 - switch_buf;
if (switch_buf == 0){
for (int j = tid; j < k2; j += block_jump){
if (j >= offset) shbuf2[j] = shbuf1[j - offset] + shbuf1[j];
else shbuf2[j] = shbuf1[j];
}
}
else if (switch_buf == 1){
for (int j = tid; j < k2; j += block_jump){
if (j >= offset) shbuf1[j] = shbuf2[j - offset] + shbuf2[j];
else shbuf1[j] = shbuf2[j];
}
}
__syncthreads();
}
for (int j = tid; j < k2; j += block_jump){
if (j > 0){
if(u[j] != u[j - 1]){
xd = (double) (u[j] < u[j - 1] ? u[j] : u[j] - 1);
xd = (xd + .5) / scale - .5;
if (floor(xd) != xd || xd < 0 || xd > w - 1) {continue;}
yd = (double) (v[j] < v[j - 1] ? v[j] : v[j - 1]);
yd = (yd + .5) / scale - .5;
if (yd < 0) yd = 0;
else if (yd > h) yd = h; yd = ceil(yd);
m = switch_buf == 0 ? shbuf2[j - 1]:shbuf1[j - 1];
x[m] = (int) xd;
y[m] = (int) yd;
m++;
}
}
}
__syncthreads();
/* compute rle encoding given y-boundary points */
m = switch_buf == 0 ? shbuf2[k2 - 1] : shbuf1[k2 - 1];
int k3 = m;
for (int j = tid; j <= k3; j += block_jump){
if (j < k3) a[j] = (uint) (x[j] * (int) (h) + y[j]);
else a[j] = (uint)(h * w);
}
k3++;
__syncthreads();
//run brick sort on a for k3+1 element
//load k3+1 elements of a into shared memory
for(long j = tid; j < k3; j += block_jump) shbuf1[j]=a[j];
__syncthreads();
uint a_temp;
for (int r = 0; r <= k3 / 2; r++){
int evenCas = k3 / 2;
int oddCas = (k3 - 1) / 2;
//start with 0, need (k3+1)/2 CAS
for (int j = tid; j < evenCas; j += block_jump){
if (shbuf1[2 * j] > shbuf1[2 * j + 1]){
a_temp = shbuf1[2 * j];
shbuf1[2 * j]=shbuf1[2 * j + 1];
shbuf1[2 * j + 1] = a_temp;
}
}
__syncthreads();
//start with 1
for (int j = tid; j < oddCas; j += block_jump){
if (shbuf1[2 * j + 1] > shbuf1[2 * j + 2]){
a_temp=shbuf1[2 * j + 1];
shbuf1[2 * j + 1] = shbuf1[2 * j + 2];
shbuf1[2 * j + 2]=a_temp;
}
}
__syncthreads();
}
for(long j = tid; j < k3; j += block_jump) {
if(j>0) shbuf2[j] = shbuf1[j - 1];
else shbuf2[j] = 0;
}
__syncthreads();
for(int j = tid; j < k3; j += block_jump){
shbuf1[j] -= shbuf2[j];
}
__syncthreads();
uint *cnts_buf = cnts + cnts_offset;
if (tid == 0){
j = m = 0;
cnts_buf[m++] = shbuf1[j++];
while (j < k3) if (shbuf1[j] > 0) cnts_buf[m++] = shbuf1[j++]; else {
j++; if (j < k3) cnts_buf[m - 1] += shbuf1[j++]; }
num_of_cnts[poly_id] = m;
}
__syncthreads();
}
at::Tensor generate_mask_targets_cuda(at::Tensor dense_vector, const std::vector<std::vector<at::Tensor>> polygons,
const at::Tensor anchors, const int mask_size){
const int M = mask_size;
assert (M < 32);
//if M >=32, shared memory buffer size may not be
//sufficient. Need to fix this by blocking
float *d_anchor_data = anchors.data_ptr<float>();
int num_of_anchors = anchors.size(0);
auto options = torch::dtype(torch::kInt).device(torch::kCPU).pinned_memory(true);
auto per_anchor_poly_idx = at::empty({num_of_anchors + 1}, options);
int num_of_poly = 0;
for (int i = 0; i < num_of_anchors; i++){
*(per_anchor_poly_idx.data_ptr<int>() + i) = num_of_poly;
num_of_poly += polygons[i].size();
}
*(per_anchor_poly_idx.data_ptr<int>() + num_of_anchors) = num_of_poly;
auto poly_rel_idx = at::empty({num_of_poly + 1}, options);
double *dense_poly_data = dense_vector.data_ptr<double>();
int start_idx = 0;
int poly_count = 0;
for(int i = 0; i < polygons.size(); i++){
for(int j=0; j < polygons[i].size(); j++) {
*(poly_rel_idx.data_ptr<int>() + poly_count) = start_idx;
start_idx += polygons[i][j].size(0);
poly_count++;
}
}
*(poly_rel_idx.data_ptr<int>() + poly_count) = start_idx;
at::Tensor d_x_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt));
at::Tensor d_y_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt));
at::Tensor d_u_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt));
at::Tensor d_v_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt));
at::Tensor d_a_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt));//used with uint* pointer
at::Tensor d_b_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt)); //used with uint* pointer
at::Tensor d_mask_t = torch::empty({M * M * num_of_poly}, torch::CUDA(at::kByte));
auto result = torch::empty({num_of_anchors, M, M}, torch::CUDA(at::kFloat));
at::Tensor d_num_of_counts_t = torch::empty({num_of_poly}, torch::CUDA(at::kInt));
at::Tensor d_cnts_t = torch::empty({CNTS_SIZE * num_of_poly}, torch::CUDA(at::kInt));
auto d_dense_vector = dense_vector.cuda();
auto d_per_anchor_poly_idx = per_anchor_poly_idx.to(torch::kCUDA, true);
auto d_poly_rel_idx = poly_rel_idx.to(torch::kCUDA, true);
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( crop_and_scale_cuda_kernel), dim3(num_of_poly), dim3(256), 0, stream.stream(), d_dense_vector.data_ptr<double>(),
d_per_anchor_poly_idx.data_ptr<int>(),
d_poly_rel_idx.data_ptr<int>(),
poly_count,
num_of_anchors,
(float4*) d_anchor_data,
M);
//TODO: larger threads-per-block might be better here, because each CTA uses 32 KB of shmem,
//and occupancy is likely shmem capacity bound
hipLaunchKernelGGL(( rle_fr_poly_cuda_kernel), dim3(num_of_poly), dim3(1024), 0, stream.stream(), d_dense_vector.data_ptr<double>(),
d_poly_rel_idx.data_ptr<int>(),
M, M,
(uint*) d_cnts_t.data_ptr<int>(),
d_x_t.data_ptr<int>(),
d_y_t.data_ptr<int>(),
d_u_t.data_ptr<int>(),
d_v_t.data_ptr<int>(),
(uint*) d_a_t.data_ptr<int>(),
(uint*) d_b_t.data_ptr<int>(),
d_num_of_counts_t.data_ptr<int>());
hipLaunchKernelGGL(( decode_rle_cuda_kernel), dim3(num_of_poly), dim3(256), 0, stream.stream(), d_num_of_counts_t.data_ptr<int>(),
(uint*) d_cnts_t.data_ptr<int>(),
M, M,
d_mask_t.data_ptr<byte>());
hipLaunchKernelGGL(( merge_masks_cuda_kernel), dim3(num_of_anchors), dim3(256), 0, stream.stream(), d_mask_t.data<byte>(), result.data_ptr<float>(),
M, d_per_anchor_poly_idx.data_ptr<int>(),
num_of_anchors);
return result;
}
| 81c6c7489ee2cbf844c42c4695bf3dc1399cf608.cu | /**
* Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <torch/extension.h>
#include <iostream>
#include <vector>
#include <cuda.h>
#include <cuda_runtime.h>
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THC.h>
#include <THC/THCDeviceUtils.cuh>
#include <math.h>
#include <algorithm>
#include <stdlib.h>
#include "cpu/vision.h"
/*rle cuda kernels are cuda version of the corresponding cpu functions here
https://github.com/cocodataset/cocoapi/blob/master/common/maskApi.c
these are only a subset of rle kernels.*/
typedef unsigned int uint;
typedef unsigned long siz;
typedef unsigned char byte;
//6144 is based on minimum shared memory size per SM
//across all pytorch-supported GPUs. Need to use blocking
//to avoid this restriction
const int BUFFER_SIZE=6144;
const int CNTS_SIZE=6144;
__global__ void crop_and_scale_cuda_kernel(double *dense_poly_data, int *per_anchor_poly_idx, int *poly_rel_idx,
int poly_count, int anchor_count, float4 *anchor_data, int mask_size){
int tid = threadIdx.x;
int block_jump = blockDim.x;
int poly_id = blockIdx.x;
int anchor_idx;
for (anchor_idx = 0; anchor_idx < anchor_count; anchor_idx++){
if (poly_id < per_anchor_poly_idx[anchor_idx + 1]) break;
}
float w = anchor_data[anchor_idx].z - anchor_data[anchor_idx].x;
float h = anchor_data[anchor_idx].w - anchor_data[anchor_idx].y;
w = fmaxf(w, 1.0f);
h = fmaxf(h, 1.0f);
float ratio_h = ((float) mask_size) / h;
float ratio_w = ((float) mask_size) / w;
int poly_ptr_idx_start = poly_rel_idx[poly_id];
int poly_ptr_idx_end = poly_rel_idx[poly_id + 1];
double *poly_data_buf = dense_poly_data + poly_ptr_idx_start;
int len = poly_ptr_idx_end - poly_ptr_idx_start;
for (int j = tid; j < len; j += block_jump){
if (j % 2 == 0) poly_data_buf[j] = ratio_w*((float) poly_data_buf[j]- anchor_data[anchor_idx].x);
if (j % 2 == 1) poly_data_buf[j] = ratio_h*((float) poly_data_buf[j]- anchor_data[anchor_idx].y);
}
}
//merging masks happens on mask format, not RLE format.
__global__ void merge_masks_cuda_kernel(byte *masks_in, float *masks_out, const int mask_size,
int *per_anchor_poly_idx, int anchor_count){
int anchor_idx = blockIdx.x;
int tid = threadIdx.x;
int jump_block = blockDim.x;
int mask_start_idx = per_anchor_poly_idx[anchor_idx];
int num_of_masks_to_merge = per_anchor_poly_idx[anchor_idx + 1]-per_anchor_poly_idx[anchor_idx];
for(int j = tid; j < mask_size * mask_size; j += jump_block){
int transposed_pixel = (j % mask_size) * mask_size + j / mask_size;
byte pixel = 0;
for(int k = 0; k < num_of_masks_to_merge; k++){
if (masks_in[(mask_start_idx + k) * mask_size * mask_size + j] == 1) pixel = 1;
if (pixel == 1) break;
}
masks_out[anchor_idx * mask_size * mask_size + transposed_pixel] = (float) pixel;
}
}
/*cuda version of rleDecode function in this API:
https://github.com/cocodataset/cocoapi/blob/master/common/maskApi.c*/
__global__ void decode_rle_cuda_kernel(const int *num_of_cnts, uint *cnts, long h, long w, byte *mask)
{
int poly_id = blockIdx.x;
int tid = threadIdx.x;
int block_jump = blockDim.x;
int m = num_of_cnts[poly_id];
uint *cnts_buf = cnts + CNTS_SIZE * poly_id;
byte *mask_ptr = mask + poly_id * h * w;
__shared__ uint shbuf1[CNTS_SIZE];
__shared__ uint shbuf2[CNTS_SIZE];
//initialize shbuf for scan. first element is 0 (exclusive scan)
for (long i = tid; i < CNTS_SIZE; i += block_jump){
shbuf1[i] = (i <= m & i > 0) ? cnts_buf[i - 1]:0;
shbuf2[i] = (i <= m & i > 0) ? cnts_buf[i - 1]:0;
}
__syncthreads();
//double buffering for scan
int switch_buf = 0;
for (int offset = 1; offset <= m; offset *= 2){
switch_buf = 1 - switch_buf;
if(switch_buf == 0){
for(int j = tid;j <= m;j += block_jump){
if(j >= offset) shbuf2[j] = shbuf1[j]+shbuf1[j - offset];
else shbuf2[j] = shbuf1[j];
}
}else if (switch_buf == 1){
for(int j = tid;j <= m;j += block_jump){
if(j >= offset) shbuf1[j] = shbuf2[j] + shbuf2[j - offset];
else shbuf1[j] = shbuf2[j];
}
}
__syncthreads();
}
uint *scanned_buf = switch_buf == 0 ? shbuf2 : shbuf1;
//find which bin pixel j falls into , which determines the pixel value
//use binary search
for(int j = tid; j < h * w; j += block_jump){
int bin = 0;
int min_idx = 0;
int max_idx = m;
int mid_idx = m / 2;
while(max_idx > min_idx){
if(j > scanned_buf[mid_idx]) {
min_idx = mid_idx+1;
mid_idx = (min_idx + max_idx) / 2;
}
else if (j < scanned_buf[mid_idx]) {
max_idx = mid_idx;
mid_idx = (min_idx + max_idx) / 2;
}
else {
mid_idx++;
break;
}
}
int k = mid_idx;
byte pixel = k % 2 == 0 ? 1 : 0;
mask_ptr[j] = pixel;
}
}
/*cuda version of rleFrPoly function in this API:
https://github.com/cocodataset/cocoapi/blob/master/common/maskApi.c*/
__global__ void rle_fr_poly_cuda_kernel(const double *dense_coordinates, int *poly_rel_idx, long h, long w,
uint *cnts, int *x_in, int *y_in, int *u_in, int *v_in, uint *a_in,
uint *b_in, int *num_of_cnts) {
int poly_id = blockIdx.x;
int tid = threadIdx.x;
int block_jump = blockDim.x;
long cnts_offset = poly_id * CNTS_SIZE;
long k = (poly_rel_idx[poly_id + 1] - poly_rel_idx[poly_id]) / 2;
const double *xy = dense_coordinates + poly_rel_idx[poly_id];
int *x = x_in + poly_id * BUFFER_SIZE;
int *y = y_in + poly_id * BUFFER_SIZE;
int *u = u_in + poly_id * BUFFER_SIZE;
int *v = v_in + poly_id * BUFFER_SIZE;
uint *a = a_in + poly_id * BUFFER_SIZE;
uint *b = b_in + poly_id * BUFFER_SIZE;
/* upsample and get discrete points densely along entire boundary */
long j, m = 0;
double scale = 5;
__shared__ int shbuf1[BUFFER_SIZE];
__shared__ int shbuf2[BUFFER_SIZE];
for(long j = tid; j < BUFFER_SIZE; j += block_jump) {
shbuf1[j] = 0;
shbuf2[j] = 0;
}
for(long j = tid; j <= k; j += block_jump)
x[j] = j < k ? ((int) (scale * xy[2 * j + 0] + 0.5)) : ((int) (scale * xy[0] + 0.5));
for(long j = tid; j <= k; j += block_jump)
y[j] = j < k ? ((int) (scale * xy[2 * j + 1] + 0.5)) : ((int) (scale * xy[1] + 0.5));
__syncthreads();
for(int j = tid; j < k; j += block_jump){
int xs = x[j], xe = x[j + 1], ys = y[j], ye = y[j + 1], dx, dy, t, d, dist;
int flip;
double s;
dx = abs(xe - xs);
dy = abs(ys - ye);
flip = (dx >= dy && xs > xe) || (dx < dy && ys > ye);
if (flip) {t = xs; xs = xe; xe = t; t = ys; ys = ye; ye = t;}
s = dx >= dy ? (double) (ye - ys) / dx : (double) (xe - xs) / dy;
dist = dx >= dy ? dx + 1 : dy + 1;
shbuf1[j + 1] = dist;
shbuf2[j + 1] = dist;
}
__syncthreads();
//block-wide exclusive prefix scan
int switch_buf = 0;
for (int offset = 1; offset <= k; offset *= 2){
switch_buf = 1 - switch_buf;
if (switch_buf == 0){
for(int j = tid; j <= k; j += block_jump){
if (j >= offset) shbuf2[j] = shbuf1[j] + shbuf1[j - offset];
else shbuf2[j] = shbuf1[j];
}
}
else if (switch_buf == 1){
for(int j = tid; j <= k; j += block_jump){
if (j >= offset) shbuf1[j] = shbuf2[j] + shbuf2[j - offset];
else shbuf1[j] = shbuf2[j];
}
}
__syncthreads();
}
for (int j = tid; j < k; j += block_jump){
int xs = x[j], xe = x[j + 1], ys = y[j], ye = y[j + 1], dx, dy, t, d, dist;
int flip;
double s;
dx = __sad(xe, xs, 0);
dy = __sad(ys, ye, 0);
flip = (dx >= dy && xs > xe) || (dx < dy && ys > ye);
if (flip) {t = xs; xs = xe; xe = t; t = ys; ys = ye; ye = t;}
s = dx >= dy ? (double) (ye - ys) / dx : (double) (xe - xs) / dy;
m = switch_buf == 0 ? shbuf2[j] : shbuf1[j];
if (dx >= dy) for (d = 0; d <= dx; d++) {
/*the multiplication statement 's*t' causes nvcc to optimize with flush-to-zero=True for
double precision multiply, which we observe produces different results than CPU occasionally.
To force flush-to-zero=False, we use __dmul_rn intrinsics function */
t = flip ? dx - d : d;
u[m] = t + xs;
v[m] = (int) (ys + __dmul_rn(s, t) + .5);
m++;
}
else for (d = 0; d <= dy; d++) {
t = flip ? dy - d : d;
v[m] = t + ys;
u[m] = (int) (xs + __dmul_rn(s, t) + .5);
m++;
}
}
__syncthreads();
m = switch_buf == 0 ? shbuf2[k] : shbuf1[k];
int k2 = m;
__syncthreads();
double xd, yd;
if (tid == 0) {
shbuf1[tid] = 0;
shbuf2[tid] = 0;
}
/* get points along y-boundary and downsample */
for (int j = tid; j < k2; j += block_jump){
if (j > 0){
if (u[j] != u[j - 1]){
xd = (double) (u[j] < u[j-1] ? u[j] : u[j] - 1);
xd = (xd + .5) / scale - .5;
if (floor(xd) != xd || xd < 0 || xd > w - 1 ) {
shbuf1[j] = 0;
shbuf2[j] = 0;
continue;
}
yd = (double) (v[j] < v[j - 1] ? v[j] : v[j - 1]); yd = (yd + .5) / scale - .5;
if (yd < 0) yd = 0;
else if (yd > h) yd = h; yd = ceil(yd);
shbuf1[j] = 1;
shbuf2[j] = 1;
} else {
shbuf1[j] = 0;
shbuf2[j] = 0;
}
}
}
__syncthreads();
//exclusive prefix scan
switch_buf = 0;
for (int offset = 1; offset < k2; offset *= 2){
switch_buf = 1 - switch_buf;
if (switch_buf == 0){
for (int j = tid; j < k2; j += block_jump){
if (j >= offset) shbuf2[j] = shbuf1[j - offset] + shbuf1[j];
else shbuf2[j] = shbuf1[j];
}
}
else if (switch_buf == 1){
for (int j = tid; j < k2; j += block_jump){
if (j >= offset) shbuf1[j] = shbuf2[j - offset] + shbuf2[j];
else shbuf1[j] = shbuf2[j];
}
}
__syncthreads();
}
for (int j = tid; j < k2; j += block_jump){
if (j > 0){
if(u[j] != u[j - 1]){
xd = (double) (u[j] < u[j - 1] ? u[j] : u[j] - 1);
xd = (xd + .5) / scale - .5;
if (floor(xd) != xd || xd < 0 || xd > w - 1) {continue;}
yd = (double) (v[j] < v[j - 1] ? v[j] : v[j - 1]);
yd = (yd + .5) / scale - .5;
if (yd < 0) yd = 0;
else if (yd > h) yd = h; yd = ceil(yd);
m = switch_buf == 0 ? shbuf2[j - 1]:shbuf1[j - 1];
x[m] = (int) xd;
y[m] = (int) yd;
m++;
}
}
}
__syncthreads();
/* compute rle encoding given y-boundary points */
m = switch_buf == 0 ? shbuf2[k2 - 1] : shbuf1[k2 - 1];
int k3 = m;
for (int j = tid; j <= k3; j += block_jump){
if (j < k3) a[j] = (uint) (x[j] * (int) (h) + y[j]);
else a[j] = (uint)(h * w);
}
k3++;
__syncthreads();
//run brick sort on a for k3+1 element
//load k3+1 elements of a into shared memory
for(long j = tid; j < k3; j += block_jump) shbuf1[j]=a[j];
__syncthreads();
uint a_temp;
for (int r = 0; r <= k3 / 2; r++){
int evenCas = k3 / 2;
int oddCas = (k3 - 1) / 2;
//start with 0, need (k3+1)/2 CAS
for (int j = tid; j < evenCas; j += block_jump){
if (shbuf1[2 * j] > shbuf1[2 * j + 1]){
a_temp = shbuf1[2 * j];
shbuf1[2 * j]=shbuf1[2 * j + 1];
shbuf1[2 * j + 1] = a_temp;
}
}
__syncthreads();
//start with 1
for (int j = tid; j < oddCas; j += block_jump){
if (shbuf1[2 * j + 1] > shbuf1[2 * j + 2]){
a_temp=shbuf1[2 * j + 1];
shbuf1[2 * j + 1] = shbuf1[2 * j + 2];
shbuf1[2 * j + 2]=a_temp;
}
}
__syncthreads();
}
for(long j = tid; j < k3; j += block_jump) {
if(j>0) shbuf2[j] = shbuf1[j - 1];
else shbuf2[j] = 0;
}
__syncthreads();
for(int j = tid; j < k3; j += block_jump){
shbuf1[j] -= shbuf2[j];
}
__syncthreads();
uint *cnts_buf = cnts + cnts_offset;
if (tid == 0){
j = m = 0;
cnts_buf[m++] = shbuf1[j++];
while (j < k3) if (shbuf1[j] > 0) cnts_buf[m++] = shbuf1[j++]; else {
j++; if (j < k3) cnts_buf[m - 1] += shbuf1[j++]; }
num_of_cnts[poly_id] = m;
}
__syncthreads();
}
at::Tensor generate_mask_targets_cuda(at::Tensor dense_vector, const std::vector<std::vector<at::Tensor>> polygons,
const at::Tensor anchors, const int mask_size){
const int M = mask_size;
assert (M < 32);
//if M >=32, shared memory buffer size may not be
//sufficient. Need to fix this by blocking
float *d_anchor_data = anchors.data_ptr<float>();
int num_of_anchors = anchors.size(0);
auto options = torch::dtype(torch::kInt).device(torch::kCPU).pinned_memory(true);
auto per_anchor_poly_idx = at::empty({num_of_anchors + 1}, options);
int num_of_poly = 0;
for (int i = 0; i < num_of_anchors; i++){
*(per_anchor_poly_idx.data_ptr<int>() + i) = num_of_poly;
num_of_poly += polygons[i].size();
}
*(per_anchor_poly_idx.data_ptr<int>() + num_of_anchors) = num_of_poly;
auto poly_rel_idx = at::empty({num_of_poly + 1}, options);
double *dense_poly_data = dense_vector.data_ptr<double>();
int start_idx = 0;
int poly_count = 0;
for(int i = 0; i < polygons.size(); i++){
for(int j=0; j < polygons[i].size(); j++) {
*(poly_rel_idx.data_ptr<int>() + poly_count) = start_idx;
start_idx += polygons[i][j].size(0);
poly_count++;
}
}
*(poly_rel_idx.data_ptr<int>() + poly_count) = start_idx;
at::Tensor d_x_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt));
at::Tensor d_y_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt));
at::Tensor d_u_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt));
at::Tensor d_v_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt));
at::Tensor d_a_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt));//used with uint* pointer
at::Tensor d_b_t = torch::empty({BUFFER_SIZE * num_of_poly}, torch::CUDA(at::kInt)); //used with uint* pointer
at::Tensor d_mask_t = torch::empty({M * M * num_of_poly}, torch::CUDA(at::kByte));
auto result = torch::empty({num_of_anchors, M, M}, torch::CUDA(at::kFloat));
at::Tensor d_num_of_counts_t = torch::empty({num_of_poly}, torch::CUDA(at::kInt));
at::Tensor d_cnts_t = torch::empty({CNTS_SIZE * num_of_poly}, torch::CUDA(at::kInt));
auto d_dense_vector = dense_vector.cuda();
auto d_per_anchor_poly_idx = per_anchor_poly_idx.to(torch::kCUDA, true);
auto d_poly_rel_idx = poly_rel_idx.to(torch::kCUDA, true);
auto stream = at::cuda::getCurrentCUDAStream();
crop_and_scale_cuda_kernel<<<num_of_poly, 256, 0, stream.stream()>>>(d_dense_vector.data_ptr<double>(),
d_per_anchor_poly_idx.data_ptr<int>(),
d_poly_rel_idx.data_ptr<int>(),
poly_count,
num_of_anchors,
(float4*) d_anchor_data,
M);
//TODO: larger threads-per-block might be better here, because each CTA uses 32 KB of shmem,
//and occupancy is likely shmem capacity bound
rle_fr_poly_cuda_kernel<<<num_of_poly, 1024, 0, stream.stream()>>>(d_dense_vector.data_ptr<double>(),
d_poly_rel_idx.data_ptr<int>(),
M, M,
(uint*) d_cnts_t.data_ptr<int>(),
d_x_t.data_ptr<int>(),
d_y_t.data_ptr<int>(),
d_u_t.data_ptr<int>(),
d_v_t.data_ptr<int>(),
(uint*) d_a_t.data_ptr<int>(),
(uint*) d_b_t.data_ptr<int>(),
d_num_of_counts_t.data_ptr<int>());
decode_rle_cuda_kernel<<<num_of_poly, 256, 0, stream.stream()>>>(d_num_of_counts_t.data_ptr<int>(),
(uint*) d_cnts_t.data_ptr<int>(),
M, M,
d_mask_t.data_ptr<byte>());
merge_masks_cuda_kernel<<<num_of_anchors, 256, 0, stream.stream()>>>(d_mask_t.data<byte>(), result.data_ptr<float>(),
M, d_per_anchor_poly_idx.data_ptr<int>(),
num_of_anchors);
return result;
}
|
54c8c7d2e132f2636866f1ed8921d152682978a4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "decrement_dynamic_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *pInts = NULL;
hipMalloc(&pInts, XSIZE*YSIZE);
size_t numInts = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
decrement_dynamic_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, pInts,numInts);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
decrement_dynamic_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, pInts,numInts);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
decrement_dynamic_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, pInts,numInts);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 54c8c7d2e132f2636866f1ed8921d152682978a4.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "decrement_dynamic_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *pInts = NULL;
cudaMalloc(&pInts, XSIZE*YSIZE);
size_t numInts = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
decrement_dynamic_kernel<<<gridBlock,threadBlock>>>(pInts,numInts);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
decrement_dynamic_kernel<<<gridBlock,threadBlock>>>(pInts,numInts);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
decrement_dynamic_kernel<<<gridBlock,threadBlock>>>(pInts,numInts);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
8faf894f623715bf48f39b44aa982f06e9fd3b44.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//#include <thrust/transform_reduce.h>
#include "tensors/tensor_operators.h"
#include "functional/functional.h"
#include "functional/tensor.h"
#include "tensors/gpu/backend.h"
#include "tensors/gpu/cuda_helpers.h"
#include "3rd_party/reduce_all.h"
namespace marian {
namespace gpu {
struct isnan_test {
__host__ __device__ bool operator()(const float a) const { return isnan(a); }
};
__device__ inline float stableLogit(float x) {
if(x >= 0) {
float z = expf(-x);
return 1.0 / (1.0 + z);
} else {
float z = expf(x);
return z / (1.0 + z);
}
}
bool IsNan(Tensor in) {
// hipSetDevice(in->getDevice().no);
// thrust::device_ptr<float> begin = thrust::device_pointer_cast(in->data());
// thrust::device_ptr<float> end
// = thrust::device_pointer_cast(in->data() + in->size());
// return thrust::transform_reduce(
// begin, end, isnan_test(), 0, thrust::plus<bool>());
return false;
}
void ConcatCont(Tensor out, const std::vector<Tensor>& inputs, int axis) {
hipSetDevice(out->getDevice().no);
int step = 1;
for(int i = 0; i < axis; ++i)
step *= out->shape()[i];
size_t offset1 = 0;
for(int i = 0; i < step; ++i) {
for(auto in : inputs) {
size_t size = in->shape().elements() / step;
size_t offset2 = i * size;
hipMemcpy(out->data() + offset1,
in->data() + offset2,
size * sizeof(float),
hipMemcpyDeviceToDevice);
offset1 += size;
}
}
hipStreamSynchronize(0);
}
__global__ void gInsertCols(float* out,
const float* in,
size_t rows,
size_t cols,
size_t cols_out,
size_t cols_in,
size_t offset_out,
size_t offset_in) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* rowOut = out + j * cols_out + offset_out;
const float* rowIn = in + j * cols_in + offset_in;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols)
rowOut[i] = rowIn[i];
}
}
}
}
void Concatenate1(Tensor out, const std::vector<Tensor>& inputs) {
hipSetDevice(out->getDevice().no);
int rows = out->shape().elements() / out->shape().back();
size_t offset = 0;
int cols_out = out->shape().back();
for(auto in : inputs) {
ABORT_IF(rows != in->shape().elements() / in->shape().back(),
"First dimension must be equal");
int cols_in = in->shape().back();
int blocks = ::min(MAX_BLOCKS, rows);
int threads = ::min(MAX_THREADS, cols_in);
hipLaunchKernelGGL(( gInsertCols), dim3(blocks), dim3(threads), 0, 0,
out->data(), in->data(), rows, cols_in, cols_out, cols_in, offset, 0);
offset += cols_in;
}
hipStreamSynchronize(0);
}
__global__ void gJoin2(float* out, size_t rowBatch, size_t cols,
const float* in1, size_t inStride1,
const float* in2, size_t inStride2) {
int outStride = inStride1 + inStride2;
int rows = rowBatch * outStride;
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* rowOut = out + j * cols;
int curBatch = j / outStride;
int curPos = j % outStride;
int jIn1 = (curBatch * inStride1) + curPos;
int jIn2 = (curBatch * inStride2) + curPos - inStride1;
const float* rowIn1 = in1 + jIn1 * cols;
const float* rowIn2 = in2 + jIn2 * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
if(curPos < inStride1)
rowOut[i] = rowIn1[i];
else
rowOut[i] = rowIn2[i];
}
}
}
}
}
void Concatenate2(Tensor out, Tensor in1, Tensor in2) {
hipSetDevice(out->getDevice().no);
size_t rows = out->shape().elements() / out->shape().back();
size_t cols = out->shape().back();
size_t rowStride1 = in1->shape()[-2];
size_t rowStride2 = in2->shape()[-2];
size_t rowBatch = rows / out->shape()[-2];
int blocks = ::min(MAX_BLOCKS, (int)rows);
int threads = ::min(MAX_THREADS, (int)cols);
hipLaunchKernelGGL(( gJoin2), dim3(blocks), dim3(threads), 0, 0, out->data(),
rowBatch,
cols,
in1->data(),
rowStride1,
in2->data(),
rowStride2);
hipStreamSynchronize(0);
}
void Concatenate(Tensor out, const std::vector<Tensor>& inputs, int ax) {
if(ax == out->shape().size() - 1)
Concatenate1(out, inputs);
else if(ax == out->shape().size() - 2 && inputs.size() == 2)
Concatenate2(out, inputs[0], inputs[1]);
else
ConcatCont(out, inputs, ax);
}
void Split1(std::vector<Tensor>& outputs, const Tensor in) {
hipSetDevice(in->getDevice().no);
size_t offset = 0;
int rows = in->shape().elements() / in->shape().back();
int cols_in = in->shape().back();
for(auto out : outputs) {
ABORT_IF(rows != out->shape().elements() / out->shape().back(),
"First dimension must be equal");
int cols_out = out->shape().back();
int blocks = ::min(MAX_BLOCKS, rows);
int threads = ::min(MAX_THREADS, cols_out);
hipLaunchKernelGGL(( gInsertCols), dim3(blocks), dim3(threads), 0, 0,
out->data(), in->data(), rows, cols_out, cols_out, cols_in, 0, offset);
offset += cols_out;
}
hipStreamSynchronize(0);
}
void SplitCont(std::vector<Tensor>& outputs, const Tensor in, int axis) {
hipSetDevice(in->getDevice().no);
int step = 1;
for(int i = 0; i < axis; ++i)
step *= in->shape()[i];
size_t offset1 = 0;
for(int i = 0; i < step; ++i) {
for(auto out : outputs) {
size_t size = out->shape().elements() / step;
size_t offset2 = i * size;
hipMemcpyAsync(out->data() + offset2,
in->data() + offset1,
size * sizeof(float),
hipMemcpyDeviceToDevice);
offset1 += size;
}
}
hipStreamSynchronize(0);
}
void Deconcatenate(std::vector<Tensor>& outputs, const Tensor in, int ax) {
if(ax == in->shape().size() - 1)
Split1(outputs, in);
else
SplitCont(outputs, in, ax);
}
__global__ void gTransposeND(
functional::Tensor<float> out,
const functional::Tensor<float> in,
const functional::Array<int, functional::Shape::size()> permute) {
constexpr size_t N = functional::Shape::size();
functional::Array<int, N> oDims;
functional::Array<int, N> pDims;
int length = out.shape().elements();
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
out.shape().dims(index, oDims);
for(int i = 0; i < N; ++i)
pDims[permute[i]] = oDims[i];
out[index] = in[pDims];
}
}
}
__global__
void gTranspose0213(float* out, const float* in,
int rows,
int cols,
int stride1,
int stride2) {
int stride = stride1 * stride2;
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* rowOut = out + j * cols;
int z = j / stride;
int y = (j % stride) / stride1;
int x = (j % stride) % stride1;
int j2 = z * stride + x * stride2 + y;
const float* rowIn = in + j2 * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols)
rowOut[i] = rowIn[i];
}
}
}
}
void TransposeND(Tensor out, Tensor in, const std::vector<int>& vAxis) {
hipSetDevice(out->getDevice().no);
if(vAxis == std::vector<int>({0, 2, 1, 3})) {
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = ::min(MAX_BLOCKS, rows);
int threads = ::min(MAX_THREADS, cols);
int stride1 = out->shape()[-2];
int stride2 = out->shape()[-3];
hipLaunchKernelGGL(( gTranspose0213), dim3(blocks), dim3(threads), 0, 0, out->data(), in->data(), rows, cols, stride1, stride2);
}
else {
functional::Array<int, functional::Shape::size()> axes;
int diff = functional::Shape::size() - vAxis.size();
for(int i = 0; i < axes.size(); ++i)
if(i < diff)
axes[i] = i;
else
axes[i] = vAxis[i - diff] + diff;
int length = out->shape().elements();
int threads = ::min(MAX_THREADS, length);
int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0));
hipLaunchKernelGGL(( gTransposeND), dim3(blocks), dim3(threads), 0, 0, out, in, axes);
}
}
__global__ void gSoftmax(float* out,
functional::Shape outShape,
const float* in,
const float* mask,
const functional::Shape maskShape) {
int rows = outShape.elements() / outShape.back();
int cols = outShape.back();
bool broadcast = outShape != maskShape;
functional::Array<int, functional::Shape::size()> dims;
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* so = out + j * cols;
const float* sp = in + j * cols;
extern __shared__ float _share[];
float* _max = _share + blockDim.x;
_max[threadIdx.x] = -CUDA_FLT_MAX; // mask
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float mVal = 1.f;
if(mask) {
int mIndex = id + j * cols;
if(broadcast) {
outShape.dims(mIndex, dims);
mIndex = maskShape.bindex(dims);
}
mVal = mask[mIndex];
}
if(mVal && sp[id] > _max[threadIdx.x])
_max[threadIdx.x] = sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
if(_max[threadIdx.x + skip] > _max[threadIdx.x]) {
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float mVal = 1.f;
if(mask) {
int mIndex = id + j * cols;
if(broadcast) {
outShape.dims(mIndex, dims);
mIndex = maskShape.bindex(dims);
}
mVal = mask[mIndex];
}
float ex = 0;
if(mVal)
ex = __expf(sp[id] - max);
so[id] = ex;
_sum[threadIdx.x] += ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
so[id] = so[id] / _sum[0];
}
}
}
}
}
void Softmax(Tensor out, Tensor in, Tensor mask) {
hipSetDevice(out->getDevice().no);
size_t m = out->shape().elements() / out->shape().back();
size_t k = out->shape().back();
int blocks = ::min(MAX_BLOCKS, (int)m);
int threads = ::min(MAX_THREADS, (int)k);
int shared = sizeof(float) * threads * 2;
if(mask)
hipLaunchKernelGGL(( gSoftmax), dim3(blocks), dim3(threads), shared, 0,
out->data(), out->shape(), in->data(), mask->data(), mask->shape());
else
hipLaunchKernelGGL(( gSoftmax), dim3(blocks), dim3(threads), shared, 0,
out->data(), out->shape(), in->data(), 0, out->shape());
}
__global__ void gLogSoftmax(float* out,
const functional::Shape outShape,
const float* in) {
int rows = outShape.elements() / outShape.back();
int cols = outShape.back();
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* so = out + j * cols;
const float* sp = in + j * cols;
extern __shared__ float _share[];
float* _max = _share + blockDim.x;
_max[threadIdx.x] = sp[threadIdx.x];
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
if(sp[id] > _max[threadIdx.x])
_max[threadIdx.x] = sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
if(_max[threadIdx.x + skip] > _max[threadIdx.x]) {
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float sm = sp[id] - max;
float ex = __expf(sm);
so[id] = sm;
_sum[threadIdx.x] += ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols)
so[id] -= __logf(_sum[0]);
}
}
}
}
void LogSoftmax(Tensor out, Tensor in) {
hipSetDevice(out->getDevice().no);
size_t m = out->shape().elements() / out->shape().back();
size_t k = out->shape().back();
int blocks = ::min(MAX_BLOCKS, (int)m);
int threads = ::min(MAX_THREADS, (int)k);
int shared = sizeof(float) * threads * 2;
hipLaunchKernelGGL(( gLogSoftmax), dim3(blocks), dim3(threads), shared, 0,
out->data(), out->shape(), in->data());
}
///////////////////////////////////////////////////////
__global__ void gSoftmaxGrad(float* grad,
const float* adj,
const float* val,
const int rows,
const int cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
extern __shared__ float _share[];
float* _sum = _share + blockDim.x;
float* gradRow = grad + j * cols;
const float* adjRow = adj + j * cols;
const float* valRow = val + j * cols;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
_sum[threadIdx.x] += valRow[id] * adjRow[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float val = valRow[id] * (adjRow[id] - _sum[0]);
if(val)
gradRow[id] += val;
}
}
}
}
}
void SoftmaxGrad(Tensor grad, Tensor adj, Tensor val) {
hipSetDevice(adj->getDevice().no);
// grad and val are both m-by-k matrices, passed as input.
// A weighted average of each row of grad (according to the weights
// specified in val) is computed and subtracted from Out.
// adj is multiplied for each element to get backward step in autodiff
int m = grad->shape().elements() / grad->shape().back();
int k = grad->shape().back();
int blocks = ::min(MAX_BLOCKS, m);
int threads = ::min(MAX_THREADS, k);
int shared = sizeof(float) * threads * 2;
hipLaunchKernelGGL(( gSoftmaxGrad), dim3(blocks), dim3(threads), shared, 0,
grad->data(), adj->data(), val->data(), m, k);
}
__global__ void gLogSoftmaxGrad(float* grad,
const float* adj,
const float* val,
const int rows,
const int cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
extern __shared__ float _share[];
float* _sum = _share + blockDim.x;
float* gradRow = grad + j * cols;
const float* adjRow = adj + j * cols;
const float* valRow = val + j * cols;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
_sum[threadIdx.x] += adjRow[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols)
gradRow[id] += adjRow[id] - (expf(valRow[id]) * _sum[0]);
}
}
}
}
void LogSoftmaxGrad(Tensor grad, Tensor adj, Tensor val) {
hipSetDevice(adj->getDevice().no);
// grad and val are both m-by-k matrices, passed as input.
// A weighted average of each row of grad (according to the weights
// specified in val) is computed and subtracted from Out.
// adj is multiplied for each element to get backward step in autodiff
int m = grad->shape().elements() / grad->shape().back();
int k = grad->shape().back();
int blocks = ::min(MAX_BLOCKS, m);
int threads = ::min(MAX_THREADS, k);
int shared = sizeof(float) * threads * 2;
hipLaunchKernelGGL(( gLogSoftmaxGrad), dim3(blocks), dim3(threads), shared, 0,
grad->data(), adj->data(), val->data(), m, k);
}
///////////////////////////////////////////////////////
__global__ void gArgmax(float* out,
const float* data,
size_t rows,
size_t cols) {
size_t row = blockIdx.x;
size_t startInd = row * cols;
float maxScore = -99999;
size_t maxInd;
for(size_t col = 0; col < cols; ++col) {
size_t ind = startInd + col;
float score = data[ind];
if(score > maxScore) {
maxScore = score;
maxInd = col;
}
}
out[row] = maxInd;
}
///////////////////////////////////////////////////////
__global__ void gCopyRows(float* out,
const float* in,
size_t cols,
const size_t* sourceRowIdx,
size_t rows) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
size_t dstId = j;
size_t srcId = sourceRowIdx[j];
float* rowOut = out + dstId * cols;
const float* rowIn = in + srcId * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols)
rowOut[i] = rowIn[i];
}
}
}
}
void CopyRows(Tensor out, const Tensor in, const std::vector<size_t>& indices) {
hipSetDevice(out->getDevice().no);
size_t cols = in->shape().back();
size_t rowsToCopy = indices.size();
int threads = ::min(MAX_THREADS, (int)cols);
int blocks = ::min(MAX_BLOCKS, (int)rowsToCopy);
size_t* d_indices;
CUDA_CHECK(hipMalloc(&d_indices, rowsToCopy * sizeof(size_t)));
CUDA_CHECK(hipMemcpy(d_indices,
indices.data(),
rowsToCopy * sizeof(size_t),
hipMemcpyHostToDevice));
hipLaunchKernelGGL(( gCopyRows), dim3(blocks), dim3(threads), 0, 0,
out->data(), in->data(), cols, d_indices, rowsToCopy);
CUDA_CHECK(hipFree(d_indices));
}
__global__ void gPasteRows(float* out,
const float* in,
size_t cols,
const size_t* targetRowIdx,
size_t rows) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
size_t dstId = targetRowIdx[j];
size_t srcId = j;
float* rowOut = out + dstId * cols;
const float* rowIn = in + srcId * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols)
atomicAdd(rowOut + i, rowIn[i]);
}
}
}
}
void PasteRows(Tensor out,
const Tensor in,
const std::vector<size_t>& indices) {
hipSetDevice(out->getDevice().no);
size_t cols = in->shape().back();
size_t rowsToCopy = indices.size();
int threads = ::min(MAX_THREADS, (int)cols);
int blocks = ::min(MAX_BLOCKS, (int)rowsToCopy);
// @TODO: turn into tensor
size_t* d_indices;
CUDA_CHECK(hipMalloc(&d_indices, rowsToCopy * sizeof(size_t)));
CUDA_CHECK(hipMemcpy(d_indices,
indices.data(),
rowsToCopy * sizeof(size_t),
hipMemcpyHostToDevice));
hipLaunchKernelGGL(( gPasteRows), dim3(blocks), dim3(threads), 0, 0,
out->data(), in->data(), cols, d_indices, rowsToCopy);
CUDA_CHECK(hipFree(d_indices));
}
/////////////
__global__ void gCopyCols(float* out,
const float* in,
size_t rows,
size_t colsIn,
const size_t* sourceColIdx,
size_t colsOut) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* rowIn = in + j * colsIn;
float* rowOut = out + j * colsOut;
for(int tid = 0; tid < colsOut; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < colsOut)
rowOut[i] = rowIn[sourceColIdx[i]];
}
}
}
}
void CopyCols(Tensor out, const Tensor in, const std::vector<size_t>& indices) {
hipSetDevice(out->getDevice().no);
size_t rows = in->shape().elements() / in->shape().back();
size_t cols = in->shape().back();
size_t colsToCopy = indices.size();
int threads = ::min(MAX_THREADS, (int)colsToCopy);
int blocks = ::min(MAX_BLOCKS, (int)rows);
size_t* d_indices;
CUDA_CHECK(hipMalloc(&d_indices, colsToCopy * sizeof(size_t)));
CUDA_CHECK(hipMemcpy(d_indices,
indices.data(),
colsToCopy * sizeof(size_t),
hipMemcpyHostToDevice));
hipLaunchKernelGGL(( gCopyCols), dim3(blocks), dim3(threads), 0, 0,
out->data(), in->data(), rows, cols, d_indices, colsToCopy);
CUDA_CHECK(hipFree(d_indices));
}
__global__ void gPasteCols(float* out,
const float* in,
size_t rows,
size_t colsOut,
const size_t* targetColIdx,
size_t colsIn) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* rowIn = in + j * colsIn;
float* rowOut = out + j * colsOut;
for(int tid = 0; tid < colsIn; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < colsIn)
rowOut[targetColIdx[i]] = rowIn[i];
}
}
}
}
void PasteCols(Tensor out,
const Tensor in,
const std::vector<size_t>& indices) {
hipSetDevice(out->getDevice().no);
size_t rows = in->shape().elements() / in->shape().back();
size_t cols = in->shape().back();
size_t colsToCopy = indices.size();
int threads = ::min(MAX_THREADS, (int)colsToCopy);
int blocks = ::min(MAX_BLOCKS, (int)rows);
size_t* d_indices;
CUDA_CHECK(hipMalloc(&d_indices, colsToCopy * sizeof(size_t)));
CUDA_CHECK(hipMemcpy(d_indices,
indices.data(),
colsToCopy * sizeof(size_t),
hipMemcpyHostToDevice));
hipLaunchKernelGGL(( gPasteCols), dim3(blocks), dim3(threads), 0, 0,
out->data(), in->data(), rows, cols, d_indices, colsToCopy);
CUDA_CHECK(hipFree(d_indices));
}
__global__ void gSelect(float* out,
functional::Shape outShape,
const float* in,
const functional::Shape inShape,
int axis,
size_t* d_indices) {
int length = outShape.elements();
functional::Array<int, functional::Shape::size()> dims;
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
outShape.dims(index, dims);
dims[axis] = d_indices[dims[axis]];
int inIndex = inShape.index(dims);
out[index] = in[inIndex];
}
}
}
__global__ void gInsert(float* out,
functional::Shape outShape,
const float* in,
const functional::Shape inShape,
int axis,
size_t* d_indices) {
int length = inShape.elements();
functional::Array<int, functional::Shape::size()> dims;
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
inShape.dims(index, dims);
dims[axis] = d_indices[dims[index]];
int outIndex = outShape.index(dims);
out[outIndex] = in[index];
}
}
}
void Select(Tensor out,
const Tensor in,
int axis,
const std::vector<size_t>& indices,
Ptr<Allocator> allocator) {
hipSetDevice(out->getDevice().no);
int length = out->shape().elements();
int threads = ::min(MAX_THREADS, length);
int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0));
auto mp_indices = allocator->alloc<size_t>(indices.size());
CudaCopy(indices.data(),
indices.data() + indices.size(),
mp_indices->data<size_t>());
int axisGPU = axis + functional::Shape::size() - out->shape().size();
hipLaunchKernelGGL(( gSelect), dim3(blocks), dim3(threads), 0, 0, out->data(),
out->shape(),
in->data(),
in->shape(),
axisGPU,
mp_indices->data<size_t>());
allocator->free(mp_indices);
}
void Insert(Tensor out,
const Tensor in,
int axis,
const std::vector<size_t>& indices,
Ptr<Allocator> allocator) {
hipSetDevice(in->getDevice().no);
int length = in->shape().elements();
int threads = ::min(MAX_THREADS, length);
int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0));
auto mp_indices = allocator->alloc<size_t>(indices.size());
CudaCopy(indices.data(),
indices.data() + indices.size(),
mp_indices->data<size_t>());
int axisGPU = axis + functional::Shape::size() - out->shape().size();
hipLaunchKernelGGL(( gInsert), dim3(blocks), dim3(threads), 0, 0, out->data(),
out->shape(),
in->data(),
in->shape(),
axisGPU,
mp_indices->data<size_t>());
allocator->free(mp_indices);
}
__global__ void gGRUFastForward(float* out,
const float* state,
const float* xW,
const float* sU,
const float* b,
const float* mask,
size_t rows,
size_t cols,
bool final) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float m = !mask || mask[j];
float* rowOut = out + j * cols;
const float* rowState = state + j * cols;
const float* xWrow = xW + j * cols * 3;
const float* sUrow = sU + j * cols * 3;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
float r = stableLogit(xWrow[i] + sUrow[i] + b[i]);
int k = i + cols;
float z = stableLogit(xWrow[k] + sUrow[k] + b[k]);
int l = i + 2 * cols;
float h;
if(final)
h = tanhf(xWrow[l] + (sUrow[l] + b[l]) * r);
else
h = tanhf(xWrow[l] + sUrow[l] * r + b[l]);
float out = (1.0f - z) * h + z * rowState[i];
rowOut[i] = m * out + (1 - m) * rowState[i];
}
}
}
}
}
void GRUFastForward(Tensor out, std::vector<Tensor> inputs, bool final) {
hipSetDevice(out->getDevice().no);
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = ::min(MAX_BLOCKS, rows);
int threads = ::min(MAX_THREADS, cols);
hipLaunchKernelGGL(( gGRUFastForward), dim3(blocks), dim3(threads), 0, 0,
out->data(), // output
inputs[0]->data(), // state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
inputs.size() > 4 ? inputs[4]->data() : 0, // mask
rows,
cols,
final);
}
__global__ void gGRUFastBackward(float* outState,
float* outXW,
float* outSU,
float* outB,
const float* state,
const float* xW,
const float* sU,
const float* b,
const float* mask,
const float* adj,
size_t rows,
size_t cols,
bool final) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float m = !mask || mask[j];
float* rowOutState = outState + j * cols;
float* rowOutXW = outXW + j * cols * 3;
float* rowOutSU = outSU + j * cols * 3;
const float* rowState = state + j * cols;
const float* rowXW = xW + j * cols * 3;
const float* rowSU = sU + j * cols * 3;
const float* rowAdj = adj + j * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
int k = i + cols;
int l = i + 2 * cols;
float r = stableLogit(rowXW[i] + rowSU[i] + b[i]);
float z = stableLogit(rowXW[k] + rowSU[k] + b[k]);
float h;
if(final)
h = tanhf(rowXW[l] + (rowSU[l] + b[l]) * r);
else
h = tanhf(rowXW[l] + rowSU[l] * r + b[l]);
float adj = rowAdj[i];
float t = (1 - z) * (1 - h * h);
// df/ds
if(outState)
rowOutState[i] += (m * z - m + 1) * adj;
// df/d(xW_r) ...
float dfdxW_r = m * r * (1 - r) * t * adj;
if(final)
dfdxW_r *= rowSU[l] + b[l];
else
dfdxW_r *= rowSU[l];
if(outXW)
rowOutXW[i] += dfdxW_r;
if(outSU)
rowOutSU[i] += dfdxW_r;
if(outB)
atomicAdd(outB + i, dfdxW_r);
// df/d(xW_z) ...
float dfdxW_z = m * (1 - z) * z * (rowState[i] - h) * adj;
if(outXW)
rowOutXW[k] += dfdxW_z;
if(outSU)
rowOutSU[k] += dfdxW_z;
if(outB)
atomicAdd(outB + k, dfdxW_z);
// df/d(xW_x) ...
float dfdxW_x = m * t * adj;
if(outXW)
rowOutXW[l] += dfdxW_x;
if(outSU)
rowOutSU[l] += dfdxW_x * r;
if(outB)
if(final)
atomicAdd(outB + l, dfdxW_x * r);
else
atomicAdd(outB + l, dfdxW_x);
}
}
}
}
}
void GRUFastBackward(std::vector<Tensor> outputs,
std::vector<Tensor> inputs,
Tensor adj,
bool final) {
hipSetDevice(adj->getDevice().no);
int rows = adj->shape().elements() / adj->shape().back();
int cols = adj->shape().back();
int blocks = ::min(MAX_BLOCKS, rows);
int threads = ::min(MAX_THREADS, cols);
hipLaunchKernelGGL(( gGRUFastBackward), dim3(blocks), dim3(threads), 0, 0,
outputs[0] ? outputs[0]->data() : 0, // state - adj
outputs[1] ? outputs[1]->data() : 0, // xW - adj
outputs[2] ? outputs[2]->data() : 0, // sU - adj
outputs[3] ? outputs[3]->data() : 0, // b - adj
inputs[0]->data(), // state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
inputs.size() > 4 ? inputs[4]->data() : 0, // mask
adj->data(),
rows,
cols,
final);
}
__global__ void gCrossEntropyPick(float* out,
const functional::Shape outShape,
const float* in,
const functional::Shape inShape,
const float* pick) {
int rows = inShape.elements() / inShape.back();
int cols = inShape.back();
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* sp = in + j * cols;
extern __shared__ float _share[];
float* _max = _share + blockDim.x;
_max[threadIdx.x] = sp[threadIdx.x];
for(int tid = 1; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
if(sp[id] > _max[threadIdx.x])
_max[threadIdx.x] = sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
if(_max[threadIdx.x + skip] > _max[threadIdx.x]) {
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
_sum[threadIdx.x] += __expf(sp[id] - max);
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
// cross-entropy
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id == (int)pick[j]) {
out[j] = __logf(_sum[0]) - sp[id] + max;
}
}
}
}
}
void CrossEntropyPick(Tensor out, Tensor in, Tensor pick) {
hipSetDevice(out->getDevice().no);
int rows = in->shape().elements() / in->shape().back();
int cols = in->shape().back();
int blocks = ::min(MAX_BLOCKS, (int)rows);
int threads = ::min(MAX_THREADS, (int)cols);
int shared = sizeof(float) * threads * 2;
hipLaunchKernelGGL(( gCrossEntropyPick), dim3(blocks), dim3(threads), shared, 0,
out->data(), out->shape(), in->data(), in->shape(), pick->data());
}
__global__ void gCrossEntropyPickBackward(float* out,
const functional::Shape outShape,
const float* adj,
const float* in,
const float* pick) {
int rows = outShape.elements() / outShape.back();
int cols = outShape.back();
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* sp = in + j * cols;
float* so = out + j * cols;
extern __shared__ float _share[];
float* _max = _share + blockDim.x;
_max[threadIdx.x] = sp[threadIdx.x];
for(int tid = 1; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
if(sp[id] > _max[threadIdx.x])
_max[threadIdx.x] = sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
if(_max[threadIdx.x + skip] > _max[threadIdx.x]) {
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float ex = __expf(sp[id] - max);
_sum[threadIdx.x] += ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
// cross-entropy
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float sub = (float)(id == (int)pick[j]);
so[id] += adj[j] * (__expf(sp[id] - max) / _sum[0] - sub);
}
}
}
}
}
void CrossEntropyPickBackward(Tensor out, Tensor adj, Tensor a, Tensor pick) {
hipSetDevice(out->getDevice().no);
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = ::min(MAX_BLOCKS, (int)rows);
int threads = ::min(MAX_THREADS, (int)cols);
int shared = sizeof(float) * threads * 2;
hipLaunchKernelGGL(( gCrossEntropyPickBackward), dim3(blocks), dim3(threads), shared, 0,
out->data(), out->shape(), adj->data(), a->data(), pick->data());
}
float L2Norm(Tensor in) {
hipSetDevice(in->getDevice().no);
int size = in->shape().elements();
int threads = ::min(MAX_THREADS, size);
int blocks = ::min(MAX_BLOCKS, size / threads + (size % threads != 0));
uint8_t* data;
hipMalloc(&data, blocks * sizeof(float));
Tensor out(new TensorBase(New<MemoryPiece>(data, blocks * sizeof(float)),
{1, blocks},
in->getBackend()));
using namespace functional;
ReduceAll(_1 * _1, out, in);
float dataCpu = sqrtf(out->get(0));
out.reset();
hipFree(data);
return dataCpu;
}
__global__ void gAtt(float* out,
const float* va,
const float* ctx,
const float* state,
int m, // total rows (batch x time x beam)
int k, // depth
int b, // batch size
int t // time of ctx
) {
int rows = m;
int cols = k;
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* vaRow = va;
const float* ctxRow = ctx + (j % (b * t)) * cols;
const float* stateRow = state + ((j / (b * t)) * b + j % b) * cols;
extern __shared__ float _share[];
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float z = ctxRow[id] + stateRow[id];
float ex = tanhf(z) * vaRow[id];
_sum[threadIdx.x] += ex;
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
out[j] = _sum[0];
__syncthreads();
}
}
}
void Att(Tensor out, Tensor va, Tensor context, Tensor state) {
hipSetDevice(out->getDevice().no);
size_t m = out->shape().elements() / out->shape().back();
size_t k = context->shape()[-1];
size_t b = context->shape()[-2];
size_t t = context->shape()[-3];
int blocks = ::min(MAX_BLOCKS, (int)m);
int threads = ::min(MAX_THREADS, (int)k);
int shared = sizeof(float) * threads * 2;
hipLaunchKernelGGL(( gAtt), dim3(blocks), dim3(threads), shared, 0,
out->data(), va->data(), context->data(), state->data(), m, k, b, t);
}
__global__ void gAttBack(float* gVa,
float* gContext,
float* gState,
const float* va,
const float* context,
const float* state,
const float* adj,
int m, // rows
int k, // cols
int n // batch size
) {
int rows = m;
int cols = k;
for(int bid = 0; bid < m; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* gcRow = gContext + j * cols;
float* gsRow = gState + (j % n) * cols;
const float* cRow = context + j * cols;
const float* sRow = state + (j % n) * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float z = cRow[id] + sRow[id];
float t = tanhf(z);
float r = va[id] * (1.f - t * t);
gcRow[id] += r * adj[j];
gsRow[id] += r * adj[j];
atomicAdd(gVa + id, t * adj[j]);
}
}
}
}
}
void AttBack(Tensor gVa,
Tensor gContext,
Tensor gState,
Tensor va,
Tensor context,
Tensor state,
Tensor adj) {
hipSetDevice(adj->getDevice().no);
size_t m = adj->shape().elements() / adj->shape()[-1];
size_t k = context->shape()[-1];
size_t n = context->shape()[-2];
int blocks = ::min(MAX_BLOCKS, (int)n);
int threads = ::min(MAX_THREADS, (int)k);
hipLaunchKernelGGL(( gAttBack), dim3(blocks), dim3(threads), 0, 0, gVa->data(),
gContext->data(),
gState->data(),
va->data(),
context->data(),
state->data(),
adj->data(),
m,
k,
n);
}
__global__ void gLNormalization(float* out,
const float* in,
const float* alpha,
const float* beta,
int rows,
int cols,
float eps = 1e-9) {
extern __shared__ float _share[];
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* so = out + j * cols;
const float* sp = in + j * cols;
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0f;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
_sum[threadIdx.x] += sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
float mean = _sum[0] / cols;
__syncthreads();
float* _sqSum = _share + blockDim.x;
_sqSum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float ex = sp[id] - mean;
_sqSum[threadIdx.x] += ex * ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sqSum[threadIdx.x] += _sqSum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
float sigma = sqrtf(eps + (_sqSum[0] / cols));
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float t = alpha[id] * ((sp[id] - mean) / sigma);
if(beta != nullptr)
t += beta[id];
so[id] = t;
}
}
}
}
}
void LayerNormalization(Tensor out,
Tensor in,
Tensor gamma,
Tensor beta,
float eps) {
hipSetDevice(out->getDevice().no);
int rows = in->shape().elements() / in->shape().back();
int cols = in->shape().back();
int blocks = ::min(MAX_BLOCKS, (int)rows);
int threads = ::min(MAX_THREADS, (int)cols);
int shared = 2 * threads * sizeof(float);
hipLaunchKernelGGL(( gLNormalization), dim3(blocks), dim3(threads), shared, 0, out->data(),
in->data(),
gamma->data(),
beta ? beta->data() : nullptr,
rows,
cols,
eps);
}
__global__ void gLayerNormalizationGrad(float* gradX,
float* gradGamma,
float* gradBeta,
float* adj,
float* y,
float* x,
float* gamma,
float* beta,
int rows,
int cols,
float eps = 1e-9) {
extern __shared__ float shared[];
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* sum_adj = shared;
float* sum_adj_x = shared + blockDim.x;
float* sum_x = shared + 2 * blockDim.x;
float* sum_sqr = shared + 3 * blockDim.x;
const float* xRow = x + j * cols;
const float* yRow = y + j * cols;
const float* adjRow = adj + j * cols;
float* gradXRow = gradX + j * cols;
sum_x[threadIdx.x] = 0.0f;
sum_adj[threadIdx.x] = 0.0f;
sum_adj_x[threadIdx.x] = 0.0f;
sum_sqr[threadIdx.x] = 0.0f;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
sum_x[threadIdx.x] += xRow[id];
sum_adj_x[threadIdx.x]
+= adjRow[id] * (yRow[id] - ((beta) ? beta[id] : 0)) / gamma[id];
sum_adj[threadIdx.x] += adjRow[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
sum_x[threadIdx.x] += sum_x[threadIdx.x + skip];
sum_adj[threadIdx.x] += sum_adj[threadIdx.x + skip];
sum_adj_x[threadIdx.x] += sum_adj_x[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
float mean = sum_x[0] / cols;
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float ex = xRow[id] - mean;
sum_sqr[threadIdx.x] += ex * ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
sum_sqr[threadIdx.x] += sum_sqr[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
float sigma = sqrtf(eps + (sum_sqr[0] / cols));
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float grad_x = 0.0f;
float x_hat = (yRow[id] - ((beta) ? beta[id] : 0)) / gamma[id];
grad_x += cols * adjRow[id];
grad_x -= sum_adj[0];
grad_x -= sum_adj_x[0] * x_hat;
grad_x /= (cols * sigma);
float valX = gamma[id] * grad_x;
float sign = (0.f < valX) - (valX < 0.f);
valX = fabs(valX) > 1000 ? sign * 1000 : valX;
gradXRow[id] += valX;
atomicAdd(gradGamma + id, adjRow[id] * x_hat);
if(beta) {
atomicAdd(gradBeta + id, adjRow[id]);
}
}
}
}
}
}
void LayerNormalizationGrad(Tensor gradX,
Tensor gradGamma,
Tensor gradBeta,
Tensor adj,
Tensor y,
Tensor x,
Tensor gamma,
Tensor beta,
float eps) {
hipSetDevice(adj->getDevice().no);
int rows = y->shape().elements() / y->shape()[-1];
int cols = y->shape()[-1];
int threads = ::min(MAX_THREADS, cols);
int blocks = ::min(MAX_BLOCKS, rows);
int shared = sizeof(float) * threads * 4;
hipLaunchKernelGGL(( gLayerNormalizationGrad), dim3(blocks), dim3(threads), shared, 0,
gradX->data(),
gradGamma->data(),
(gradBeta) ? gradBeta->data() : nullptr,
adj->data(),
y->data(),
x->data(),
gamma->data(),
(beta) ? beta->data() : nullptr,
rows,
cols,
eps);
}
__global__ void gShift(float* out, const float* in, int length, int offset) {
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
if(index - offset < 0 || index - offset >= length)
out[index] = 0;
else
out[index] = in[index - offset];
}
}
}
void Shift(Tensor out, Tensor in, marian::Shape shift, bool invert) {
ABORT_IF(in->shape().size() != shift.size(), "bad dimensions");
// BUGBUG: This can only shift along the first axis. Shifting, e.g., along the last axis cannot be implemented this way.
int offset = 0;
for(int i = 0; i < shift.size(); ++i)
offset += in->shape().stride(i) * shift[i];
if(invert)
offset = -offset;
hipSetDevice(out->getDevice().no);
int length = out->shape().elements();
int threads = ::min(MAX_THREADS, length);
int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0));
hipLaunchKernelGGL(( gShift), dim3(blocks), dim3(threads), 0, 0, out->data(), in->data(), length, offset);
}
__global__ void gSetSparse(float* out,
const size_t* indices,
const float* values,
int length) {
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
out[indices[index]] = values[index];
}
}
}
void SetSparse(float* out,
const std::vector<size_t>& indices,
const std::vector<float>& values) {
int length = indices.size();
int threads = ::min(MAX_THREADS, length);
int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0));
size_t* d_indices;
CUDA_CHECK(hipMalloc(&d_indices, length * sizeof(size_t)));
CUDA_CHECK(hipMemcpy(d_indices,
indices.data(),
length * sizeof(size_t),
hipMemcpyHostToDevice));
float* d_values;
CUDA_CHECK(hipMalloc(&d_values, length * sizeof(float)));
CUDA_CHECK(hipMemcpy(
d_values, values.data(), length * sizeof(float), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( gSetSparse), dim3(blocks), dim3(threads), 0, 0, out, d_indices, d_values, length);
hipFree(d_indices);
hipFree(d_values);
}
/******************************************************************************/
__global__ void gLSTMCellForward(float* out,
const float* cell,
const float* xW,
const float* sU,
const float* b,
const float* mask,
size_t rows,
size_t cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float m = !mask || mask[j];
float* rowOut = out + j * cols;
const float* rowCell = cell + j * cols;
const float* xWrow = xW + j * cols * 4;
const float* sUrow = sU + j * cols * 4;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
float gf = stableLogit(xWrow[i] + sUrow[i] + b[i]);
int k = i + cols;
float gi = stableLogit(xWrow[k] + sUrow[k] + b[k]);
int l = i + 2 * cols;
float gc = tanhf(xWrow[l] + sUrow[l] + b[l]);
float cout = gf * rowCell[i] + gi * gc;
rowOut[i] = m * cout + (1 - m) * rowCell[i];
}
}
}
}
}
void LSTMCellForward(Tensor out, std::vector<Tensor> inputs) {
hipSetDevice(out->getDevice().no);
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = ::min(MAX_BLOCKS, rows);
int threads = ::min(MAX_THREADS, cols);
hipLaunchKernelGGL(( gLSTMCellForward), dim3(blocks), dim3(threads), 0, 0,
out->data(), // output
inputs[0]->data(), // cell state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
inputs.size() > 4 ? inputs[4]->data() : 0, // mask
rows,
cols);
}
__global__ void gLSTMOutputForward(float* out,
const float* cell,
const float* xW,
const float* sU,
const float* b,
size_t rows,
size_t cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* rowOut = out + j * cols;
const float* rowCell = cell + j * cols;
const float* xWrow = xW + j * cols * 4;
const float* sUrow = sU + j * cols * 4;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
int k = i + 3 * cols;
float go = stableLogit(xWrow[k] + sUrow[k] + b[k]);
rowOut[i] = go * tanhf(rowCell[i]);
}
}
}
}
}
void LSTMOutputForward(Tensor out, std::vector<Tensor> inputs) {
hipSetDevice(out->getDevice().no);
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = ::min(MAX_BLOCKS, rows);
int threads = ::min(MAX_THREADS, cols);
hipLaunchKernelGGL(( gLSTMOutputForward), dim3(blocks), dim3(threads), 0, 0, out->data(), // output
inputs[0]->data(), // cell state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
rows,
cols);
}
__global__ void gLSTMCellBackward(float* outCell,
float* outXW,
float* outSU,
float* outB,
const float* cell,
const float* xW,
const float* sU,
const float* b,
const float* mask,
const float* adj,
size_t rows,
size_t cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float m = !mask || mask[j];
float* rowOutCell = outCell + j * cols;
float* rowOutXW = outXW + j * cols * 4;
float* rowOutSU = outSU + j * cols * 4;
const float* rowCell = cell + j * cols;
const float* xWrow = xW + j * cols * 4;
const float* sUrow = sU + j * cols * 4;
const float* rowAdj = adj + j * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
float gf = stableLogit(xWrow[i] + sUrow[i] + b[i]);
int k = i + cols;
float gi = stableLogit(xWrow[k] + sUrow[k] + b[k]);
int l = i + 2 * cols;
float gc = tanhf(xWrow[l] + sUrow[l] + b[l]);
float adj = rowAdj[i];
// dc/dc_{t-1}
if(outCell)
rowOutCell[i] += (m * gf - m + 1) * adj;
// dc/d(b_f) = dc/d(xW_f) ...
float dcdxf = m * rowCell[i] * gf * (1 - gf) * adj;
if(outXW)
rowOutXW[i] += dcdxf;
if(outSU)
rowOutSU[i] += dcdxf;
if(outB)
atomicAdd(outB + i, dcdxf);
// dc/d(b_i) ...
float dcdb_i = m * gc * gi * (1 - gi) * adj;
if(outXW)
rowOutXW[k] += dcdb_i;
if(outSU)
rowOutSU[k] += dcdb_i;
if(outB)
atomicAdd(outB + k, dcdb_i);
// dc/d(b_c) ...
float dcdxc = m * gi * (1 - gc * gc) * adj;
if(outXW)
rowOutXW[l] += dcdxc;
if(outSU)
rowOutSU[l] += dcdxc;
if(outB)
atomicAdd(outB + l, dcdxc);
}
}
}
}
}
void LSTMCellBackward(std::vector<Tensor> outputs,
std::vector<Tensor> inputs,
Tensor adj) {
hipSetDevice(adj->getDevice().no);
int rows = adj->shape().elements() / adj->shape().back();
int cols = adj->shape().back();
int blocks = ::min(MAX_BLOCKS, rows);
int threads = ::min(MAX_THREADS, cols);
hipLaunchKernelGGL(( gLSTMCellBackward), dim3(blocks), dim3(threads), 0, 0,
outputs[0] ? outputs[0]->data() : 0, // state - adj
outputs[1] ? outputs[1]->data() : 0, // xW - adj
outputs[2] ? outputs[2]->data() : 0, // sU - adj
outputs[3] ? outputs[3]->data() : 0, // b - adj
inputs[0]->data(), // state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
inputs.size() > 4 ? inputs[4]->data() : 0, // mask
adj->data(),
rows,
cols);
}
__global__ void gLSTMOutputBackward(float* outCell,
float* outXW,
float* outSU,
float* outB,
const float* cell,
const float* xW,
const float* sU,
const float* b,
const float* adj,
size_t rows,
size_t cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* rowOutCell = outCell + j * cols;
float* rowOutXW = outXW + j * cols * 4;
float* rowOutSU = outSU + j * cols * 4;
const float* rowCell = cell + j * cols;
const float* xWrow = xW + j * cols * 4;
const float* sUrow = sU + j * cols * 4;
const float* rowAdj = adj + j * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
int k = i + 3 * cols;
float go = stableLogit(xWrow[k] + sUrow[k] + b[k]);
float t = tanhf(rowCell[i]);
float adj = rowAdj[i];
// dc/dc_{t-1}
if(outCell)
rowOutCell[i] += go * (1 - t * t) * adj;
// dc/d(b_o) = dc/d(xW_f) ...
float dcdxo = t * go * (1 - go) * adj;
if(outXW)
rowOutXW[k] += dcdxo;
if(outSU)
rowOutSU[k] += dcdxo;
if(outB)
atomicAdd(outB + k, dcdxo);
}
}
}
}
}
void LSTMOutputBackward(std::vector<Tensor> outputs,
std::vector<Tensor> inputs,
Tensor adj) {
hipSetDevice(adj->getDevice().no);
int rows = adj->shape().elements() / adj->shape().back();
int cols = adj->shape().back();
int blocks = ::min(MAX_BLOCKS, rows);
int threads = ::min(MAX_THREADS, cols);
hipLaunchKernelGGL(( gLSTMOutputBackward), dim3(blocks), dim3(threads), 0, 0,
outputs[0] ? outputs[0]->data() : 0, // state - adj
outputs[1] ? outputs[1]->data() : 0, // xW - adj
outputs[2] ? outputs[2]->data() : 0, // sU - adj
outputs[3] ? outputs[3]->data() : 0, // b - adj
inputs[0]->data(), // state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
adj->data(),
rows,
cols);
}
__global__ void gHighwayForward(float* out,
const float* in1,
const float* in2,
const float* t,
size_t length) {
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
float sigma = stableLogit(t[index]);
out[index] = in1[index] * sigma + in2[index] * (1.f - sigma);
}
}
}
void HighwayForward(Tensor out,
const Tensor in1,
const Tensor in2,
const Tensor t) {
hipSetDevice(out->getDevice().no);
int length = out->shape().elements();
int threads = ::min(MAX_THREADS, length);
int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0));
hipLaunchKernelGGL(( gHighwayForward), dim3(blocks), dim3(threads), 0, 0,
out->data(), in1->data(), in2->data(), t->data(), length);
}
__global__ void gHighwayBackward(float* out1,
float* out2,
float* outt,
const float* in1,
const float* in2,
const float* t,
const float* adj,
size_t length) {
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
float sigma = stableLogit(t[index]);
out1[index] = sigma * adj[index];
out2[index] = (1.f - sigma) * adj[index];
outt[index]
= sigma * (1.f - sigma) * (in1[index] - in2[index]) * adj[index];
}
}
}
void HighwayBackward(Tensor out1,
Tensor out2,
Tensor outt,
const Tensor in1,
const Tensor in2,
const Tensor t,
const Tensor adj) {
hipSetDevice(out1->getDevice().no);
int length = out1->shape().elements();
int threads = ::min(MAX_THREADS, length);
int blocks = ::min(MAX_BLOCKS, length / threads + (length % threads != 0));
hipLaunchKernelGGL(( gHighwayBackward), dim3(blocks), dim3(threads), 0, 0, out1->data(),
out2->data(),
outt->data(),
in1->data(),
in2->data(),
t->data(),
adj->data(),
length);
}
__global__ void gMaxPoolingForward(float* out,
int outRows,
int outCols,
float* in,
int inRows,
int inCols,
float* mask,
int numKernels,
int maskCols,
int width,
int lastWidth) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid >= outRows * outCols)
return;
int rowId = tid / outRows;
int colId = tid % outRows;
float* b = in + (rowId * inCols) + (colId * width);
float* localMask = mask + (rowId / numKernels) * maskCols + colId * width;
if(colId == outRows - 1) {
width = lastWidth;
}
float currentMax = b[0] * localMask[0];
for(int i = 1; i < width; ++i) {
if(b[i] * localMask[i] > currentMax) {
currentMax = b[i] * localMask[i];
}
}
out[rowId + (colId * outCols)] = currentMax;
}
void PoolingWithMaskingForward(Tensor out,
Tensor in,
Tensor mask,
int width,
bool isEven) {
int n = out->shape().elements();
int threads = ::min(n, MAX_THREADS);
int blocks = n / threads + (n % threads != 0);
auto& inShape = in->shape();
int inRows = inShape[0] * inShape[1];
int inCols = inShape[2];
auto& outShape = out->shape();
int outRows = outShape[2];
int outCols = outShape[0] * outShape[1];
int lastWidth
= ((inCols - isEven) % width == 0) ? width : (inCols - isEven) % width;
hipLaunchKernelGGL(( gMaxPoolingForward), dim3(blocks), dim3(threads), 0, 0, out->data(),
outRows,
outCols,
in->data(),
inRows,
inCols,
mask->data(),
outShape[1],
mask->shape()[2],
width,
lastWidth);
}
__global__ void gMaxPoolingBackward(float* adj,
int adjRows,
int adjCols,
float* in,
float* adjIn,
int inRows,
int inCols,
float* mask,
int numKernels,
int maskCols,
int width,
int lastWidth) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid >= adjRows * adjCols)
return;
int rowId = tid / adjRows;
int colId = tid % adjRows;
float* b = in + (rowId * inCols) + (colId * width);
if(colId == adjRows - 1) {
width = lastWidth;
}
float* localMask = mask + (rowId / numKernels) * maskCols + colId * width;
size_t currentMaxIdx = 0;
for(int i = 1; i < width; ++i) {
if(b[i] * localMask[i] > b[currentMaxIdx] * localMask[currentMaxIdx]) {
currentMaxIdx = i;
}
}
adjIn[(rowId * inCols) + (colId * width) + currentMaxIdx]
+= adj[rowId + (colId * adjCols)];
}
void PoolingWithMaskingBackward(Tensor adj,
Tensor adjIn,
Tensor in,
Tensor mask,
int width,
bool isEven) {
int n = adj->shape().elements();
int threads = ::min(n, 512);
int blocks = n / threads + (n % threads != 0);
auto& inShape = in->shape();
int inRows = inShape[0] * inShape[1];
int inCols = inShape[2];
auto& adjShape = adj->shape();
int adjRows = adjShape[2];
int adjCols = adjShape[0] * adjShape[1];
int lastWidth
= ((inCols - isEven) % width == 0) ? width : (inCols - isEven) % width;
hipLaunchKernelGGL(( gMaxPoolingBackward), dim3(blocks), dim3(threads), 0, 0, adj->data(),
adjRows,
adjCols,
in->data(),
adjIn->data(),
inRows,
inCols,
mask->data(),
adjShape[1],
mask->shape()[2],
width,
lastWidth);
}
}
} // namespace marian
| 8faf894f623715bf48f39b44aa982f06e9fd3b44.cu | //#include <thrust/transform_reduce.h>
#include "tensors/tensor_operators.h"
#include "functional/functional.h"
#include "functional/tensor.h"
#include "tensors/gpu/backend.h"
#include "tensors/gpu/cuda_helpers.h"
#include "3rd_party/reduce_all.h"
namespace marian {
namespace gpu {
struct isnan_test {
__host__ __device__ bool operator()(const float a) const { return isnan(a); }
};
__device__ inline float stableLogit(float x) {
if(x >= 0) {
float z = expf(-x);
return 1.0 / (1.0 + z);
} else {
float z = expf(x);
return z / (1.0 + z);
}
}
bool IsNan(Tensor in) {
// cudaSetDevice(in->getDevice().no);
// thrust::device_ptr<float> begin = thrust::device_pointer_cast(in->data());
// thrust::device_ptr<float> end
// = thrust::device_pointer_cast(in->data() + in->size());
// return thrust::transform_reduce(
// begin, end, isnan_test(), 0, thrust::plus<bool>());
return false;
}
void ConcatCont(Tensor out, const std::vector<Tensor>& inputs, int axis) {
cudaSetDevice(out->getDevice().no);
int step = 1;
for(int i = 0; i < axis; ++i)
step *= out->shape()[i];
size_t offset1 = 0;
for(int i = 0; i < step; ++i) {
for(auto in : inputs) {
size_t size = in->shape().elements() / step;
size_t offset2 = i * size;
cudaMemcpy(out->data() + offset1,
in->data() + offset2,
size * sizeof(float),
cudaMemcpyDeviceToDevice);
offset1 += size;
}
}
cudaStreamSynchronize(0);
}
__global__ void gInsertCols(float* out,
const float* in,
size_t rows,
size_t cols,
size_t cols_out,
size_t cols_in,
size_t offset_out,
size_t offset_in) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* rowOut = out + j * cols_out + offset_out;
const float* rowIn = in + j * cols_in + offset_in;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols)
rowOut[i] = rowIn[i];
}
}
}
}
void Concatenate1(Tensor out, const std::vector<Tensor>& inputs) {
cudaSetDevice(out->getDevice().no);
int rows = out->shape().elements() / out->shape().back();
size_t offset = 0;
int cols_out = out->shape().back();
for(auto in : inputs) {
ABORT_IF(rows != in->shape().elements() / in->shape().back(),
"First dimension must be equal");
int cols_in = in->shape().back();
int blocks = std::min(MAX_BLOCKS, rows);
int threads = std::min(MAX_THREADS, cols_in);
gInsertCols<<<blocks, threads>>>(
out->data(), in->data(), rows, cols_in, cols_out, cols_in, offset, 0);
offset += cols_in;
}
cudaStreamSynchronize(0);
}
__global__ void gJoin2(float* out, size_t rowBatch, size_t cols,
const float* in1, size_t inStride1,
const float* in2, size_t inStride2) {
int outStride = inStride1 + inStride2;
int rows = rowBatch * outStride;
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* rowOut = out + j * cols;
int curBatch = j / outStride;
int curPos = j % outStride;
int jIn1 = (curBatch * inStride1) + curPos;
int jIn2 = (curBatch * inStride2) + curPos - inStride1;
const float* rowIn1 = in1 + jIn1 * cols;
const float* rowIn2 = in2 + jIn2 * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
if(curPos < inStride1)
rowOut[i] = rowIn1[i];
else
rowOut[i] = rowIn2[i];
}
}
}
}
}
void Concatenate2(Tensor out, Tensor in1, Tensor in2) {
cudaSetDevice(out->getDevice().no);
size_t rows = out->shape().elements() / out->shape().back();
size_t cols = out->shape().back();
size_t rowStride1 = in1->shape()[-2];
size_t rowStride2 = in2->shape()[-2];
size_t rowBatch = rows / out->shape()[-2];
int blocks = std::min(MAX_BLOCKS, (int)rows);
int threads = std::min(MAX_THREADS, (int)cols);
gJoin2<<<blocks, threads>>>(out->data(),
rowBatch,
cols,
in1->data(),
rowStride1,
in2->data(),
rowStride2);
cudaStreamSynchronize(0);
}
void Concatenate(Tensor out, const std::vector<Tensor>& inputs, int ax) {
if(ax == out->shape().size() - 1)
Concatenate1(out, inputs);
else if(ax == out->shape().size() - 2 && inputs.size() == 2)
Concatenate2(out, inputs[0], inputs[1]);
else
ConcatCont(out, inputs, ax);
}
void Split1(std::vector<Tensor>& outputs, const Tensor in) {
cudaSetDevice(in->getDevice().no);
size_t offset = 0;
int rows = in->shape().elements() / in->shape().back();
int cols_in = in->shape().back();
for(auto out : outputs) {
ABORT_IF(rows != out->shape().elements() / out->shape().back(),
"First dimension must be equal");
int cols_out = out->shape().back();
int blocks = std::min(MAX_BLOCKS, rows);
int threads = std::min(MAX_THREADS, cols_out);
gInsertCols<<<blocks, threads>>>(
out->data(), in->data(), rows, cols_out, cols_out, cols_in, 0, offset);
offset += cols_out;
}
cudaStreamSynchronize(0);
}
void SplitCont(std::vector<Tensor>& outputs, const Tensor in, int axis) {
cudaSetDevice(in->getDevice().no);
int step = 1;
for(int i = 0; i < axis; ++i)
step *= in->shape()[i];
size_t offset1 = 0;
for(int i = 0; i < step; ++i) {
for(auto out : outputs) {
size_t size = out->shape().elements() / step;
size_t offset2 = i * size;
cudaMemcpyAsync(out->data() + offset2,
in->data() + offset1,
size * sizeof(float),
cudaMemcpyDeviceToDevice);
offset1 += size;
}
}
cudaStreamSynchronize(0);
}
void Deconcatenate(std::vector<Tensor>& outputs, const Tensor in, int ax) {
if(ax == in->shape().size() - 1)
Split1(outputs, in);
else
SplitCont(outputs, in, ax);
}
__global__ void gTransposeND(
functional::Tensor<float> out,
const functional::Tensor<float> in,
const functional::Array<int, functional::Shape::size()> permute) {
constexpr size_t N = functional::Shape::size();
functional::Array<int, N> oDims;
functional::Array<int, N> pDims;
int length = out.shape().elements();
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
out.shape().dims(index, oDims);
for(int i = 0; i < N; ++i)
pDims[permute[i]] = oDims[i];
out[index] = in[pDims];
}
}
}
__global__
void gTranspose0213(float* out, const float* in,
int rows,
int cols,
int stride1,
int stride2) {
int stride = stride1 * stride2;
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* rowOut = out + j * cols;
int z = j / stride;
int y = (j % stride) / stride1;
int x = (j % stride) % stride1;
int j2 = z * stride + x * stride2 + y;
const float* rowIn = in + j2 * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols)
rowOut[i] = rowIn[i];
}
}
}
}
void TransposeND(Tensor out, Tensor in, const std::vector<int>& vAxis) {
cudaSetDevice(out->getDevice().no);
if(vAxis == std::vector<int>({0, 2, 1, 3})) {
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = std::min(MAX_BLOCKS, rows);
int threads = std::min(MAX_THREADS, cols);
int stride1 = out->shape()[-2];
int stride2 = out->shape()[-3];
gTranspose0213<<<blocks, threads>>>(out->data(), in->data(), rows, cols, stride1, stride2);
}
else {
functional::Array<int, functional::Shape::size()> axes;
int diff = functional::Shape::size() - vAxis.size();
for(int i = 0; i < axes.size(); ++i)
if(i < diff)
axes[i] = i;
else
axes[i] = vAxis[i - diff] + diff;
int length = out->shape().elements();
int threads = std::min(MAX_THREADS, length);
int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0));
gTransposeND<<<blocks, threads>>>(out, in, axes);
}
}
__global__ void gSoftmax(float* out,
functional::Shape outShape,
const float* in,
const float* mask,
const functional::Shape maskShape) {
int rows = outShape.elements() / outShape.back();
int cols = outShape.back();
bool broadcast = outShape != maskShape;
functional::Array<int, functional::Shape::size()> dims;
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* so = out + j * cols;
const float* sp = in + j * cols;
extern __shared__ float _share[];
float* _max = _share + blockDim.x;
_max[threadIdx.x] = -CUDA_FLT_MAX; // mask
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float mVal = 1.f;
if(mask) {
int mIndex = id + j * cols;
if(broadcast) {
outShape.dims(mIndex, dims);
mIndex = maskShape.bindex(dims);
}
mVal = mask[mIndex];
}
if(mVal && sp[id] > _max[threadIdx.x])
_max[threadIdx.x] = sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
if(_max[threadIdx.x + skip] > _max[threadIdx.x]) {
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float mVal = 1.f;
if(mask) {
int mIndex = id + j * cols;
if(broadcast) {
outShape.dims(mIndex, dims);
mIndex = maskShape.bindex(dims);
}
mVal = mask[mIndex];
}
float ex = 0;
if(mVal)
ex = __expf(sp[id] - max);
so[id] = ex;
_sum[threadIdx.x] += ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
so[id] = so[id] / _sum[0];
}
}
}
}
}
void Softmax(Tensor out, Tensor in, Tensor mask) {
cudaSetDevice(out->getDevice().no);
size_t m = out->shape().elements() / out->shape().back();
size_t k = out->shape().back();
int blocks = std::min(MAX_BLOCKS, (int)m);
int threads = std::min(MAX_THREADS, (int)k);
int shared = sizeof(float) * threads * 2;
if(mask)
gSoftmax<<<blocks, threads, shared>>>(
out->data(), out->shape(), in->data(), mask->data(), mask->shape());
else
gSoftmax<<<blocks, threads, shared>>>(
out->data(), out->shape(), in->data(), 0, out->shape());
}
__global__ void gLogSoftmax(float* out,
const functional::Shape outShape,
const float* in) {
int rows = outShape.elements() / outShape.back();
int cols = outShape.back();
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* so = out + j * cols;
const float* sp = in + j * cols;
extern __shared__ float _share[];
float* _max = _share + blockDim.x;
_max[threadIdx.x] = sp[threadIdx.x];
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
if(sp[id] > _max[threadIdx.x])
_max[threadIdx.x] = sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
if(_max[threadIdx.x + skip] > _max[threadIdx.x]) {
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float sm = sp[id] - max;
float ex = __expf(sm);
so[id] = sm;
_sum[threadIdx.x] += ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols)
so[id] -= __logf(_sum[0]);
}
}
}
}
void LogSoftmax(Tensor out, Tensor in) {
cudaSetDevice(out->getDevice().no);
size_t m = out->shape().elements() / out->shape().back();
size_t k = out->shape().back();
int blocks = std::min(MAX_BLOCKS, (int)m);
int threads = std::min(MAX_THREADS, (int)k);
int shared = sizeof(float) * threads * 2;
gLogSoftmax<<<blocks, threads, shared>>>(
out->data(), out->shape(), in->data());
}
///////////////////////////////////////////////////////
__global__ void gSoftmaxGrad(float* grad,
const float* adj,
const float* val,
const int rows,
const int cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
extern __shared__ float _share[];
float* _sum = _share + blockDim.x;
float* gradRow = grad + j * cols;
const float* adjRow = adj + j * cols;
const float* valRow = val + j * cols;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
_sum[threadIdx.x] += valRow[id] * adjRow[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float val = valRow[id] * (adjRow[id] - _sum[0]);
if(val)
gradRow[id] += val;
}
}
}
}
}
void SoftmaxGrad(Tensor grad, Tensor adj, Tensor val) {
cudaSetDevice(adj->getDevice().no);
// grad and val are both m-by-k matrices, passed as input.
// A weighted average of each row of grad (according to the weights
// specified in val) is computed and subtracted from Out.
// adj is multiplied for each element to get backward step in autodiff
int m = grad->shape().elements() / grad->shape().back();
int k = grad->shape().back();
int blocks = std::min(MAX_BLOCKS, m);
int threads = std::min(MAX_THREADS, k);
int shared = sizeof(float) * threads * 2;
gSoftmaxGrad<<<blocks, threads, shared>>>(
grad->data(), adj->data(), val->data(), m, k);
}
__global__ void gLogSoftmaxGrad(float* grad,
const float* adj,
const float* val,
const int rows,
const int cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
extern __shared__ float _share[];
float* _sum = _share + blockDim.x;
float* gradRow = grad + j * cols;
const float* adjRow = adj + j * cols;
const float* valRow = val + j * cols;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
_sum[threadIdx.x] += adjRow[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols)
gradRow[id] += adjRow[id] - (expf(valRow[id]) * _sum[0]);
}
}
}
}
void LogSoftmaxGrad(Tensor grad, Tensor adj, Tensor val) {
cudaSetDevice(adj->getDevice().no);
// grad and val are both m-by-k matrices, passed as input.
// A weighted average of each row of grad (according to the weights
// specified in val) is computed and subtracted from Out.
// adj is multiplied for each element to get backward step in autodiff
int m = grad->shape().elements() / grad->shape().back();
int k = grad->shape().back();
int blocks = std::min(MAX_BLOCKS, m);
int threads = std::min(MAX_THREADS, k);
int shared = sizeof(float) * threads * 2;
gLogSoftmaxGrad<<<blocks, threads, shared>>>(
grad->data(), adj->data(), val->data(), m, k);
}
///////////////////////////////////////////////////////
__global__ void gArgmax(float* out,
const float* data,
size_t rows,
size_t cols) {
size_t row = blockIdx.x;
size_t startInd = row * cols;
float maxScore = -99999;
size_t maxInd;
for(size_t col = 0; col < cols; ++col) {
size_t ind = startInd + col;
float score = data[ind];
if(score > maxScore) {
maxScore = score;
maxInd = col;
}
}
out[row] = maxInd;
}
///////////////////////////////////////////////////////
__global__ void gCopyRows(float* out,
const float* in,
size_t cols,
const size_t* sourceRowIdx,
size_t rows) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
size_t dstId = j;
size_t srcId = sourceRowIdx[j];
float* rowOut = out + dstId * cols;
const float* rowIn = in + srcId * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols)
rowOut[i] = rowIn[i];
}
}
}
}
void CopyRows(Tensor out, const Tensor in, const std::vector<size_t>& indices) {
cudaSetDevice(out->getDevice().no);
size_t cols = in->shape().back();
size_t rowsToCopy = indices.size();
int threads = std::min(MAX_THREADS, (int)cols);
int blocks = std::min(MAX_BLOCKS, (int)rowsToCopy);
size_t* d_indices;
CUDA_CHECK(cudaMalloc(&d_indices, rowsToCopy * sizeof(size_t)));
CUDA_CHECK(cudaMemcpy(d_indices,
indices.data(),
rowsToCopy * sizeof(size_t),
cudaMemcpyHostToDevice));
gCopyRows<<<blocks, threads>>>(
out->data(), in->data(), cols, d_indices, rowsToCopy);
CUDA_CHECK(cudaFree(d_indices));
}
__global__ void gPasteRows(float* out,
const float* in,
size_t cols,
const size_t* targetRowIdx,
size_t rows) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
size_t dstId = targetRowIdx[j];
size_t srcId = j;
float* rowOut = out + dstId * cols;
const float* rowIn = in + srcId * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols)
atomicAdd(rowOut + i, rowIn[i]);
}
}
}
}
void PasteRows(Tensor out,
const Tensor in,
const std::vector<size_t>& indices) {
cudaSetDevice(out->getDevice().no);
size_t cols = in->shape().back();
size_t rowsToCopy = indices.size();
int threads = std::min(MAX_THREADS, (int)cols);
int blocks = std::min(MAX_BLOCKS, (int)rowsToCopy);
// @TODO: turn into tensor
size_t* d_indices;
CUDA_CHECK(cudaMalloc(&d_indices, rowsToCopy * sizeof(size_t)));
CUDA_CHECK(cudaMemcpy(d_indices,
indices.data(),
rowsToCopy * sizeof(size_t),
cudaMemcpyHostToDevice));
gPasteRows<<<blocks, threads>>>(
out->data(), in->data(), cols, d_indices, rowsToCopy);
CUDA_CHECK(cudaFree(d_indices));
}
/////////////
__global__ void gCopyCols(float* out,
const float* in,
size_t rows,
size_t colsIn,
const size_t* sourceColIdx,
size_t colsOut) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* rowIn = in + j * colsIn;
float* rowOut = out + j * colsOut;
for(int tid = 0; tid < colsOut; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < colsOut)
rowOut[i] = rowIn[sourceColIdx[i]];
}
}
}
}
void CopyCols(Tensor out, const Tensor in, const std::vector<size_t>& indices) {
cudaSetDevice(out->getDevice().no);
size_t rows = in->shape().elements() / in->shape().back();
size_t cols = in->shape().back();
size_t colsToCopy = indices.size();
int threads = std::min(MAX_THREADS, (int)colsToCopy);
int blocks = std::min(MAX_BLOCKS, (int)rows);
size_t* d_indices;
CUDA_CHECK(cudaMalloc(&d_indices, colsToCopy * sizeof(size_t)));
CUDA_CHECK(cudaMemcpy(d_indices,
indices.data(),
colsToCopy * sizeof(size_t),
cudaMemcpyHostToDevice));
gCopyCols<<<blocks, threads>>>(
out->data(), in->data(), rows, cols, d_indices, colsToCopy);
CUDA_CHECK(cudaFree(d_indices));
}
__global__ void gPasteCols(float* out,
const float* in,
size_t rows,
size_t colsOut,
const size_t* targetColIdx,
size_t colsIn) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* rowIn = in + j * colsIn;
float* rowOut = out + j * colsOut;
for(int tid = 0; tid < colsIn; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < colsIn)
rowOut[targetColIdx[i]] = rowIn[i];
}
}
}
}
void PasteCols(Tensor out,
const Tensor in,
const std::vector<size_t>& indices) {
cudaSetDevice(out->getDevice().no);
size_t rows = in->shape().elements() / in->shape().back();
size_t cols = in->shape().back();
size_t colsToCopy = indices.size();
int threads = std::min(MAX_THREADS, (int)colsToCopy);
int blocks = std::min(MAX_BLOCKS, (int)rows);
size_t* d_indices;
CUDA_CHECK(cudaMalloc(&d_indices, colsToCopy * sizeof(size_t)));
CUDA_CHECK(cudaMemcpy(d_indices,
indices.data(),
colsToCopy * sizeof(size_t),
cudaMemcpyHostToDevice));
gPasteCols<<<blocks, threads>>>(
out->data(), in->data(), rows, cols, d_indices, colsToCopy);
CUDA_CHECK(cudaFree(d_indices));
}
__global__ void gSelect(float* out,
functional::Shape outShape,
const float* in,
const functional::Shape inShape,
int axis,
size_t* d_indices) {
int length = outShape.elements();
functional::Array<int, functional::Shape::size()> dims;
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
outShape.dims(index, dims);
dims[axis] = d_indices[dims[axis]];
int inIndex = inShape.index(dims);
out[index] = in[inIndex];
}
}
}
__global__ void gInsert(float* out,
functional::Shape outShape,
const float* in,
const functional::Shape inShape,
int axis,
size_t* d_indices) {
int length = inShape.elements();
functional::Array<int, functional::Shape::size()> dims;
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
inShape.dims(index, dims);
dims[axis] = d_indices[dims[index]];
int outIndex = outShape.index(dims);
out[outIndex] = in[index];
}
}
}
void Select(Tensor out,
const Tensor in,
int axis,
const std::vector<size_t>& indices,
Ptr<Allocator> allocator) {
cudaSetDevice(out->getDevice().no);
int length = out->shape().elements();
int threads = std::min(MAX_THREADS, length);
int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0));
auto mp_indices = allocator->alloc<size_t>(indices.size());
CudaCopy(indices.data(),
indices.data() + indices.size(),
mp_indices->data<size_t>());
int axisGPU = axis + functional::Shape::size() - out->shape().size();
gSelect<<<blocks, threads>>>(out->data(),
out->shape(),
in->data(),
in->shape(),
axisGPU,
mp_indices->data<size_t>());
allocator->free(mp_indices);
}
void Insert(Tensor out,
const Tensor in,
int axis,
const std::vector<size_t>& indices,
Ptr<Allocator> allocator) {
cudaSetDevice(in->getDevice().no);
int length = in->shape().elements();
int threads = std::min(MAX_THREADS, length);
int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0));
auto mp_indices = allocator->alloc<size_t>(indices.size());
CudaCopy(indices.data(),
indices.data() + indices.size(),
mp_indices->data<size_t>());
int axisGPU = axis + functional::Shape::size() - out->shape().size();
gInsert<<<blocks, threads>>>(out->data(),
out->shape(),
in->data(),
in->shape(),
axisGPU,
mp_indices->data<size_t>());
allocator->free(mp_indices);
}
__global__ void gGRUFastForward(float* out,
const float* state,
const float* xW,
const float* sU,
const float* b,
const float* mask,
size_t rows,
size_t cols,
bool final) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float m = !mask || mask[j];
float* rowOut = out + j * cols;
const float* rowState = state + j * cols;
const float* xWrow = xW + j * cols * 3;
const float* sUrow = sU + j * cols * 3;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
float r = stableLogit(xWrow[i] + sUrow[i] + b[i]);
int k = i + cols;
float z = stableLogit(xWrow[k] + sUrow[k] + b[k]);
int l = i + 2 * cols;
float h;
if(final)
h = tanhf(xWrow[l] + (sUrow[l] + b[l]) * r);
else
h = tanhf(xWrow[l] + sUrow[l] * r + b[l]);
float out = (1.0f - z) * h + z * rowState[i];
rowOut[i] = m * out + (1 - m) * rowState[i];
}
}
}
}
}
void GRUFastForward(Tensor out, std::vector<Tensor> inputs, bool final) {
cudaSetDevice(out->getDevice().no);
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = std::min(MAX_BLOCKS, rows);
int threads = std::min(MAX_THREADS, cols);
gGRUFastForward<<<blocks, threads>>>(
out->data(), // output
inputs[0]->data(), // state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
inputs.size() > 4 ? inputs[4]->data() : 0, // mask
rows,
cols,
final);
}
__global__ void gGRUFastBackward(float* outState,
float* outXW,
float* outSU,
float* outB,
const float* state,
const float* xW,
const float* sU,
const float* b,
const float* mask,
const float* adj,
size_t rows,
size_t cols,
bool final) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float m = !mask || mask[j];
float* rowOutState = outState + j * cols;
float* rowOutXW = outXW + j * cols * 3;
float* rowOutSU = outSU + j * cols * 3;
const float* rowState = state + j * cols;
const float* rowXW = xW + j * cols * 3;
const float* rowSU = sU + j * cols * 3;
const float* rowAdj = adj + j * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
int k = i + cols;
int l = i + 2 * cols;
float r = stableLogit(rowXW[i] + rowSU[i] + b[i]);
float z = stableLogit(rowXW[k] + rowSU[k] + b[k]);
float h;
if(final)
h = tanhf(rowXW[l] + (rowSU[l] + b[l]) * r);
else
h = tanhf(rowXW[l] + rowSU[l] * r + b[l]);
float adj = rowAdj[i];
float t = (1 - z) * (1 - h * h);
// df/ds
if(outState)
rowOutState[i] += (m * z - m + 1) * adj;
// df/d(xW_r) ...
float dfdxW_r = m * r * (1 - r) * t * adj;
if(final)
dfdxW_r *= rowSU[l] + b[l];
else
dfdxW_r *= rowSU[l];
if(outXW)
rowOutXW[i] += dfdxW_r;
if(outSU)
rowOutSU[i] += dfdxW_r;
if(outB)
atomicAdd(outB + i, dfdxW_r);
// df/d(xW_z) ...
float dfdxW_z = m * (1 - z) * z * (rowState[i] - h) * adj;
if(outXW)
rowOutXW[k] += dfdxW_z;
if(outSU)
rowOutSU[k] += dfdxW_z;
if(outB)
atomicAdd(outB + k, dfdxW_z);
// df/d(xW_x) ...
float dfdxW_x = m * t * adj;
if(outXW)
rowOutXW[l] += dfdxW_x;
if(outSU)
rowOutSU[l] += dfdxW_x * r;
if(outB)
if(final)
atomicAdd(outB + l, dfdxW_x * r);
else
atomicAdd(outB + l, dfdxW_x);
}
}
}
}
}
void GRUFastBackward(std::vector<Tensor> outputs,
std::vector<Tensor> inputs,
Tensor adj,
bool final) {
cudaSetDevice(adj->getDevice().no);
int rows = adj->shape().elements() / adj->shape().back();
int cols = adj->shape().back();
int blocks = std::min(MAX_BLOCKS, rows);
int threads = std::min(MAX_THREADS, cols);
gGRUFastBackward<<<blocks, threads>>>(
outputs[0] ? outputs[0]->data() : 0, // state - adj
outputs[1] ? outputs[1]->data() : 0, // xW - adj
outputs[2] ? outputs[2]->data() : 0, // sU - adj
outputs[3] ? outputs[3]->data() : 0, // b - adj
inputs[0]->data(), // state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
inputs.size() > 4 ? inputs[4]->data() : 0, // mask
adj->data(),
rows,
cols,
final);
}
__global__ void gCrossEntropyPick(float* out,
const functional::Shape outShape,
const float* in,
const functional::Shape inShape,
const float* pick) {
int rows = inShape.elements() / inShape.back();
int cols = inShape.back();
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* sp = in + j * cols;
extern __shared__ float _share[];
float* _max = _share + blockDim.x;
_max[threadIdx.x] = sp[threadIdx.x];
for(int tid = 1; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
if(sp[id] > _max[threadIdx.x])
_max[threadIdx.x] = sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
if(_max[threadIdx.x + skip] > _max[threadIdx.x]) {
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
_sum[threadIdx.x] += __expf(sp[id] - max);
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
// cross-entropy
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id == (int)pick[j]) {
out[j] = __logf(_sum[0]) - sp[id] + max;
}
}
}
}
}
void CrossEntropyPick(Tensor out, Tensor in, Tensor pick) {
cudaSetDevice(out->getDevice().no);
int rows = in->shape().elements() / in->shape().back();
int cols = in->shape().back();
int blocks = std::min(MAX_BLOCKS, (int)rows);
int threads = std::min(MAX_THREADS, (int)cols);
int shared = sizeof(float) * threads * 2;
gCrossEntropyPick<<<blocks, threads, shared>>>(
out->data(), out->shape(), in->data(), in->shape(), pick->data());
}
__global__ void gCrossEntropyPickBackward(float* out,
const functional::Shape outShape,
const float* adj,
const float* in,
const float* pick) {
int rows = outShape.elements() / outShape.back();
int cols = outShape.back();
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* sp = in + j * cols;
float* so = out + j * cols;
extern __shared__ float _share[];
float* _max = _share + blockDim.x;
_max[threadIdx.x] = sp[threadIdx.x];
for(int tid = 1; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
if(sp[id] > _max[threadIdx.x])
_max[threadIdx.x] = sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
if(_max[threadIdx.x + skip] > _max[threadIdx.x]) {
_max[threadIdx.x] = _max[threadIdx.x + skip];
}
}
len = (len + 1) >> 1;
}
__syncthreads();
float max = _max[0];
__syncthreads();
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float ex = __expf(sp[id] - max);
_sum[threadIdx.x] += ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
// cross-entropy
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float sub = (float)(id == (int)pick[j]);
so[id] += adj[j] * (__expf(sp[id] - max) / _sum[0] - sub);
}
}
}
}
}
void CrossEntropyPickBackward(Tensor out, Tensor adj, Tensor a, Tensor pick) {
cudaSetDevice(out->getDevice().no);
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = std::min(MAX_BLOCKS, (int)rows);
int threads = std::min(MAX_THREADS, (int)cols);
int shared = sizeof(float) * threads * 2;
gCrossEntropyPickBackward<<<blocks, threads, shared>>>(
out->data(), out->shape(), adj->data(), a->data(), pick->data());
}
float L2Norm(Tensor in) {
cudaSetDevice(in->getDevice().no);
int size = in->shape().elements();
int threads = std::min(MAX_THREADS, size);
int blocks = std::min(MAX_BLOCKS, size / threads + (size % threads != 0));
uint8_t* data;
cudaMalloc(&data, blocks * sizeof(float));
Tensor out(new TensorBase(New<MemoryPiece>(data, blocks * sizeof(float)),
{1, blocks},
in->getBackend()));
using namespace functional;
ReduceAll(_1 * _1, out, in);
float dataCpu = sqrtf(out->get(0));
out.reset();
cudaFree(data);
return dataCpu;
}
__global__ void gAtt(float* out,
const float* va,
const float* ctx,
const float* state,
int m, // total rows (batch x time x beam)
int k, // depth
int b, // batch size
int t // time of ctx
) {
int rows = m;
int cols = k;
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
const float* vaRow = va;
const float* ctxRow = ctx + (j % (b * t)) * cols;
const float* stateRow = state + ((j / (b * t)) * b + j % b) * cols;
extern __shared__ float _share[];
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float z = ctxRow[id] + stateRow[id];
float ex = tanhf(z) * vaRow[id];
_sum[threadIdx.x] += ex;
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
out[j] = _sum[0];
__syncthreads();
}
}
}
void Att(Tensor out, Tensor va, Tensor context, Tensor state) {
cudaSetDevice(out->getDevice().no);
size_t m = out->shape().elements() / out->shape().back();
size_t k = context->shape()[-1];
size_t b = context->shape()[-2];
size_t t = context->shape()[-3];
int blocks = std::min(MAX_BLOCKS, (int)m);
int threads = std::min(MAX_THREADS, (int)k);
int shared = sizeof(float) * threads * 2;
gAtt<<<blocks, threads, shared>>>(
out->data(), va->data(), context->data(), state->data(), m, k, b, t);
}
__global__ void gAttBack(float* gVa,
float* gContext,
float* gState,
const float* va,
const float* context,
const float* state,
const float* adj,
int m, // rows
int k, // cols
int n // batch size
) {
int rows = m;
int cols = k;
for(int bid = 0; bid < m; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* gcRow = gContext + j * cols;
float* gsRow = gState + (j % n) * cols;
const float* cRow = context + j * cols;
const float* sRow = state + (j % n) * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float z = cRow[id] + sRow[id];
float t = tanhf(z);
float r = va[id] * (1.f - t * t);
gcRow[id] += r * adj[j];
gsRow[id] += r * adj[j];
atomicAdd(gVa + id, t * adj[j]);
}
}
}
}
}
void AttBack(Tensor gVa,
Tensor gContext,
Tensor gState,
Tensor va,
Tensor context,
Tensor state,
Tensor adj) {
cudaSetDevice(adj->getDevice().no);
size_t m = adj->shape().elements() / adj->shape()[-1];
size_t k = context->shape()[-1];
size_t n = context->shape()[-2];
int blocks = std::min(MAX_BLOCKS, (int)n);
int threads = std::min(MAX_THREADS, (int)k);
gAttBack<<<blocks, threads>>>(gVa->data(),
gContext->data(),
gState->data(),
va->data(),
context->data(),
state->data(),
adj->data(),
m,
k,
n);
}
__global__ void gLNormalization(float* out,
const float* in,
const float* alpha,
const float* beta,
int rows,
int cols,
float eps = 1e-9) {
extern __shared__ float _share[];
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* so = out + j * cols;
const float* sp = in + j * cols;
float* _sum = _share + blockDim.x;
_sum[threadIdx.x] = 0.0f;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
_sum[threadIdx.x] += sp[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
float mean = _sum[0] / cols;
__syncthreads();
float* _sqSum = _share + blockDim.x;
_sqSum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float ex = sp[id] - mean;
_sqSum[threadIdx.x] += ex * ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sqSum[threadIdx.x] += _sqSum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
float sigma = sqrtf(eps + (_sqSum[0] / cols));
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float t = alpha[id] * ((sp[id] - mean) / sigma);
if(beta != nullptr)
t += beta[id];
so[id] = t;
}
}
}
}
}
void LayerNormalization(Tensor out,
Tensor in,
Tensor gamma,
Tensor beta,
float eps) {
cudaSetDevice(out->getDevice().no);
int rows = in->shape().elements() / in->shape().back();
int cols = in->shape().back();
int blocks = std::min(MAX_BLOCKS, (int)rows);
int threads = std::min(MAX_THREADS, (int)cols);
int shared = 2 * threads * sizeof(float);
gLNormalization<<<blocks, threads, shared>>>(out->data(),
in->data(),
gamma->data(),
beta ? beta->data() : nullptr,
rows,
cols,
eps);
}
__global__ void gLayerNormalizationGrad(float* gradX,
float* gradGamma,
float* gradBeta,
float* adj,
float* y,
float* x,
float* gamma,
float* beta,
int rows,
int cols,
float eps = 1e-9) {
extern __shared__ float shared[];
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* sum_adj = shared;
float* sum_adj_x = shared + blockDim.x;
float* sum_x = shared + 2 * blockDim.x;
float* sum_sqr = shared + 3 * blockDim.x;
const float* xRow = x + j * cols;
const float* yRow = y + j * cols;
const float* adjRow = adj + j * cols;
float* gradXRow = gradX + j * cols;
sum_x[threadIdx.x] = 0.0f;
sum_adj[threadIdx.x] = 0.0f;
sum_adj_x[threadIdx.x] = 0.0f;
sum_sqr[threadIdx.x] = 0.0f;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
sum_x[threadIdx.x] += xRow[id];
sum_adj_x[threadIdx.x]
+= adjRow[id] * (yRow[id] - ((beta) ? beta[id] : 0)) / gamma[id];
sum_adj[threadIdx.x] += adjRow[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1)) {
sum_x[threadIdx.x] += sum_x[threadIdx.x + skip];
sum_adj[threadIdx.x] += sum_adj[threadIdx.x + skip];
sum_adj_x[threadIdx.x] += sum_adj_x[threadIdx.x + skip];
}
len = (len + 1) >> 1;
}
__syncthreads();
float mean = sum_x[0] / cols;
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float ex = xRow[id] - mean;
sum_sqr[threadIdx.x] += ex * ex;
}
}
__syncthreads();
len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
sum_sqr[threadIdx.x] += sum_sqr[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
float sigma = sqrtf(eps + (sum_sqr[0] / cols));
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
float grad_x = 0.0f;
float x_hat = (yRow[id] - ((beta) ? beta[id] : 0)) / gamma[id];
grad_x += cols * adjRow[id];
grad_x -= sum_adj[0];
grad_x -= sum_adj_x[0] * x_hat;
grad_x /= (cols * sigma);
float valX = gamma[id] * grad_x;
float sign = (0.f < valX) - (valX < 0.f);
valX = fabs(valX) > 1000 ? sign * 1000 : valX;
gradXRow[id] += valX;
atomicAdd(gradGamma + id, adjRow[id] * x_hat);
if(beta) {
atomicAdd(gradBeta + id, adjRow[id]);
}
}
}
}
}
}
void LayerNormalizationGrad(Tensor gradX,
Tensor gradGamma,
Tensor gradBeta,
Tensor adj,
Tensor y,
Tensor x,
Tensor gamma,
Tensor beta,
float eps) {
cudaSetDevice(adj->getDevice().no);
int rows = y->shape().elements() / y->shape()[-1];
int cols = y->shape()[-1];
int threads = std::min(MAX_THREADS, cols);
int blocks = std::min(MAX_BLOCKS, rows);
int shared = sizeof(float) * threads * 4;
gLayerNormalizationGrad<<<blocks, threads, shared>>>(
gradX->data(),
gradGamma->data(),
(gradBeta) ? gradBeta->data() : nullptr,
adj->data(),
y->data(),
x->data(),
gamma->data(),
(beta) ? beta->data() : nullptr,
rows,
cols,
eps);
}
__global__ void gShift(float* out, const float* in, int length, int offset) {
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
if(index - offset < 0 || index - offset >= length)
out[index] = 0;
else
out[index] = in[index - offset];
}
}
}
void Shift(Tensor out, Tensor in, marian::Shape shift, bool invert) {
ABORT_IF(in->shape().size() != shift.size(), "bad dimensions");
// BUGBUG: This can only shift along the first axis. Shifting, e.g., along the last axis cannot be implemented this way.
int offset = 0;
for(int i = 0; i < shift.size(); ++i)
offset += in->shape().stride(i) * shift[i];
if(invert)
offset = -offset;
cudaSetDevice(out->getDevice().no);
int length = out->shape().elements();
int threads = std::min(MAX_THREADS, length);
int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0));
gShift<<<blocks, threads>>>(out->data(), in->data(), length, offset);
}
__global__ void gSetSparse(float* out,
const size_t* indices,
const float* values,
int length) {
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
out[indices[index]] = values[index];
}
}
}
void SetSparse(float* out,
const std::vector<size_t>& indices,
const std::vector<float>& values) {
int length = indices.size();
int threads = std::min(MAX_THREADS, length);
int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0));
size_t* d_indices;
CUDA_CHECK(cudaMalloc(&d_indices, length * sizeof(size_t)));
CUDA_CHECK(cudaMemcpy(d_indices,
indices.data(),
length * sizeof(size_t),
cudaMemcpyHostToDevice));
float* d_values;
CUDA_CHECK(cudaMalloc(&d_values, length * sizeof(float)));
CUDA_CHECK(cudaMemcpy(
d_values, values.data(), length * sizeof(float), cudaMemcpyHostToDevice));
gSetSparse<<<blocks, threads>>>(out, d_indices, d_values, length);
cudaFree(d_indices);
cudaFree(d_values);
}
/******************************************************************************/
__global__ void gLSTMCellForward(float* out,
const float* cell,
const float* xW,
const float* sU,
const float* b,
const float* mask,
size_t rows,
size_t cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float m = !mask || mask[j];
float* rowOut = out + j * cols;
const float* rowCell = cell + j * cols;
const float* xWrow = xW + j * cols * 4;
const float* sUrow = sU + j * cols * 4;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
float gf = stableLogit(xWrow[i] + sUrow[i] + b[i]);
int k = i + cols;
float gi = stableLogit(xWrow[k] + sUrow[k] + b[k]);
int l = i + 2 * cols;
float gc = tanhf(xWrow[l] + sUrow[l] + b[l]);
float cout = gf * rowCell[i] + gi * gc;
rowOut[i] = m * cout + (1 - m) * rowCell[i];
}
}
}
}
}
void LSTMCellForward(Tensor out, std::vector<Tensor> inputs) {
cudaSetDevice(out->getDevice().no);
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = std::min(MAX_BLOCKS, rows);
int threads = std::min(MAX_THREADS, cols);
gLSTMCellForward<<<blocks, threads>>>(
out->data(), // output
inputs[0]->data(), // cell state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
inputs.size() > 4 ? inputs[4]->data() : 0, // mask
rows,
cols);
}
__global__ void gLSTMOutputForward(float* out,
const float* cell,
const float* xW,
const float* sU,
const float* b,
size_t rows,
size_t cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* rowOut = out + j * cols;
const float* rowCell = cell + j * cols;
const float* xWrow = xW + j * cols * 4;
const float* sUrow = sU + j * cols * 4;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
int k = i + 3 * cols;
float go = stableLogit(xWrow[k] + sUrow[k] + b[k]);
rowOut[i] = go * tanhf(rowCell[i]);
}
}
}
}
}
void LSTMOutputForward(Tensor out, std::vector<Tensor> inputs) {
cudaSetDevice(out->getDevice().no);
int rows = out->shape().elements() / out->shape().back();
int cols = out->shape().back();
int blocks = std::min(MAX_BLOCKS, rows);
int threads = std::min(MAX_THREADS, cols);
gLSTMOutputForward<<<blocks, threads>>>(out->data(), // output
inputs[0]->data(), // cell state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
rows,
cols);
}
__global__ void gLSTMCellBackward(float* outCell,
float* outXW,
float* outSU,
float* outB,
const float* cell,
const float* xW,
const float* sU,
const float* b,
const float* mask,
const float* adj,
size_t rows,
size_t cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float m = !mask || mask[j];
float* rowOutCell = outCell + j * cols;
float* rowOutXW = outXW + j * cols * 4;
float* rowOutSU = outSU + j * cols * 4;
const float* rowCell = cell + j * cols;
const float* xWrow = xW + j * cols * 4;
const float* sUrow = sU + j * cols * 4;
const float* rowAdj = adj + j * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
float gf = stableLogit(xWrow[i] + sUrow[i] + b[i]);
int k = i + cols;
float gi = stableLogit(xWrow[k] + sUrow[k] + b[k]);
int l = i + 2 * cols;
float gc = tanhf(xWrow[l] + sUrow[l] + b[l]);
float adj = rowAdj[i];
// dc/dc_{t-1}
if(outCell)
rowOutCell[i] += (m * gf - m + 1) * adj;
// dc/d(b_f) = dc/d(xW_f) ...
float dcdxf = m * rowCell[i] * gf * (1 - gf) * adj;
if(outXW)
rowOutXW[i] += dcdxf;
if(outSU)
rowOutSU[i] += dcdxf;
if(outB)
atomicAdd(outB + i, dcdxf);
// dc/d(b_i) ...
float dcdb_i = m * gc * gi * (1 - gi) * adj;
if(outXW)
rowOutXW[k] += dcdb_i;
if(outSU)
rowOutSU[k] += dcdb_i;
if(outB)
atomicAdd(outB + k, dcdb_i);
// dc/d(b_c) ...
float dcdxc = m * gi * (1 - gc * gc) * adj;
if(outXW)
rowOutXW[l] += dcdxc;
if(outSU)
rowOutSU[l] += dcdxc;
if(outB)
atomicAdd(outB + l, dcdxc);
}
}
}
}
}
void LSTMCellBackward(std::vector<Tensor> outputs,
std::vector<Tensor> inputs,
Tensor adj) {
cudaSetDevice(adj->getDevice().no);
int rows = adj->shape().elements() / adj->shape().back();
int cols = adj->shape().back();
int blocks = std::min(MAX_BLOCKS, rows);
int threads = std::min(MAX_THREADS, cols);
gLSTMCellBackward<<<blocks, threads>>>(
outputs[0] ? outputs[0]->data() : 0, // state - adj
outputs[1] ? outputs[1]->data() : 0, // xW - adj
outputs[2] ? outputs[2]->data() : 0, // sU - adj
outputs[3] ? outputs[3]->data() : 0, // b - adj
inputs[0]->data(), // state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
inputs.size() > 4 ? inputs[4]->data() : 0, // mask
adj->data(),
rows,
cols);
}
__global__ void gLSTMOutputBackward(float* outCell,
float* outXW,
float* outSU,
float* outB,
const float* cell,
const float* xW,
const float* sU,
const float* b,
const float* adj,
size_t rows,
size_t cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
float* rowOutCell = outCell + j * cols;
float* rowOutXW = outXW + j * cols * 4;
float* rowOutSU = outSU + j * cols * 4;
const float* rowCell = cell + j * cols;
const float* xWrow = xW + j * cols * 4;
const float* sUrow = sU + j * cols * 4;
const float* rowAdj = adj + j * cols;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int i = tid + threadIdx.x;
if(i < cols) {
int k = i + 3 * cols;
float go = stableLogit(xWrow[k] + sUrow[k] + b[k]);
float t = tanhf(rowCell[i]);
float adj = rowAdj[i];
// dc/dc_{t-1}
if(outCell)
rowOutCell[i] += go * (1 - t * t) * adj;
// dc/d(b_o) = dc/d(xW_f) ...
float dcdxo = t * go * (1 - go) * adj;
if(outXW)
rowOutXW[k] += dcdxo;
if(outSU)
rowOutSU[k] += dcdxo;
if(outB)
atomicAdd(outB + k, dcdxo);
}
}
}
}
}
void LSTMOutputBackward(std::vector<Tensor> outputs,
std::vector<Tensor> inputs,
Tensor adj) {
cudaSetDevice(adj->getDevice().no);
int rows = adj->shape().elements() / adj->shape().back();
int cols = adj->shape().back();
int blocks = std::min(MAX_BLOCKS, rows);
int threads = std::min(MAX_THREADS, cols);
gLSTMOutputBackward<<<blocks, threads>>>(
outputs[0] ? outputs[0]->data() : 0, // state - adj
outputs[1] ? outputs[1]->data() : 0, // xW - adj
outputs[2] ? outputs[2]->data() : 0, // sU - adj
outputs[3] ? outputs[3]->data() : 0, // b - adj
inputs[0]->data(), // state
inputs[1]->data(), // xW
inputs[2]->data(), // sU
inputs[3]->data(), // b
adj->data(),
rows,
cols);
}
__global__ void gHighwayForward(float* out,
const float* in1,
const float* in2,
const float* t,
size_t length) {
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
float sigma = stableLogit(t[index]);
out[index] = in1[index] * sigma + in2[index] * (1.f - sigma);
}
}
}
void HighwayForward(Tensor out,
const Tensor in1,
const Tensor in2,
const Tensor t) {
cudaSetDevice(out->getDevice().no);
int length = out->shape().elements();
int threads = std::min(MAX_THREADS, length);
int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0));
gHighwayForward<<<blocks, threads>>>(
out->data(), in1->data(), in2->data(), t->data(), length);
}
__global__ void gHighwayBackward(float* out1,
float* out2,
float* outt,
const float* in1,
const float* in2,
const float* t,
const float* adj,
size_t length) {
for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) {
int index = bid + blockDim.x * blockIdx.x + threadIdx.x;
if(index < length) {
float sigma = stableLogit(t[index]);
out1[index] = sigma * adj[index];
out2[index] = (1.f - sigma) * adj[index];
outt[index]
= sigma * (1.f - sigma) * (in1[index] - in2[index]) * adj[index];
}
}
}
void HighwayBackward(Tensor out1,
Tensor out2,
Tensor outt,
const Tensor in1,
const Tensor in2,
const Tensor t,
const Tensor adj) {
cudaSetDevice(out1->getDevice().no);
int length = out1->shape().elements();
int threads = std::min(MAX_THREADS, length);
int blocks = std::min(MAX_BLOCKS, length / threads + (length % threads != 0));
gHighwayBackward<<<blocks, threads>>>(out1->data(),
out2->data(),
outt->data(),
in1->data(),
in2->data(),
t->data(),
adj->data(),
length);
}
__global__ void gMaxPoolingForward(float* out,
int outRows,
int outCols,
float* in,
int inRows,
int inCols,
float* mask,
int numKernels,
int maskCols,
int width,
int lastWidth) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid >= outRows * outCols)
return;
int rowId = tid / outRows;
int colId = tid % outRows;
float* b = in + (rowId * inCols) + (colId * width);
float* localMask = mask + (rowId / numKernels) * maskCols + colId * width;
if(colId == outRows - 1) {
width = lastWidth;
}
float currentMax = b[0] * localMask[0];
for(int i = 1; i < width; ++i) {
if(b[i] * localMask[i] > currentMax) {
currentMax = b[i] * localMask[i];
}
}
out[rowId + (colId * outCols)] = currentMax;
}
void PoolingWithMaskingForward(Tensor out,
Tensor in,
Tensor mask,
int width,
bool isEven) {
int n = out->shape().elements();
int threads = std::min(n, MAX_THREADS);
int blocks = n / threads + (n % threads != 0);
auto& inShape = in->shape();
int inRows = inShape[0] * inShape[1];
int inCols = inShape[2];
auto& outShape = out->shape();
int outRows = outShape[2];
int outCols = outShape[0] * outShape[1];
int lastWidth
= ((inCols - isEven) % width == 0) ? width : (inCols - isEven) % width;
gMaxPoolingForward<<<blocks, threads>>>(out->data(),
outRows,
outCols,
in->data(),
inRows,
inCols,
mask->data(),
outShape[1],
mask->shape()[2],
width,
lastWidth);
}
__global__ void gMaxPoolingBackward(float* adj,
int adjRows,
int adjCols,
float* in,
float* adjIn,
int inRows,
int inCols,
float* mask,
int numKernels,
int maskCols,
int width,
int lastWidth) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid >= adjRows * adjCols)
return;
int rowId = tid / adjRows;
int colId = tid % adjRows;
float* b = in + (rowId * inCols) + (colId * width);
if(colId == adjRows - 1) {
width = lastWidth;
}
float* localMask = mask + (rowId / numKernels) * maskCols + colId * width;
size_t currentMaxIdx = 0;
for(int i = 1; i < width; ++i) {
if(b[i] * localMask[i] > b[currentMaxIdx] * localMask[currentMaxIdx]) {
currentMaxIdx = i;
}
}
adjIn[(rowId * inCols) + (colId * width) + currentMaxIdx]
+= adj[rowId + (colId * adjCols)];
}
void PoolingWithMaskingBackward(Tensor adj,
Tensor adjIn,
Tensor in,
Tensor mask,
int width,
bool isEven) {
int n = adj->shape().elements();
int threads = std::min(n, 512);
int blocks = n / threads + (n % threads != 0);
auto& inShape = in->shape();
int inRows = inShape[0] * inShape[1];
int inCols = inShape[2];
auto& adjShape = adj->shape();
int adjRows = adjShape[2];
int adjCols = adjShape[0] * adjShape[1];
int lastWidth
= ((inCols - isEven) % width == 0) ? width : (inCols - isEven) % width;
gMaxPoolingBackward<<<blocks, threads>>>(adj->data(),
adjRows,
adjCols,
in->data(),
adjIn->data(),
inRows,
inCols,
mask->data(),
adjShape[1],
mask->shape()[2],
width,
lastWidth);
}
}
} // namespace marian
|
7944215aefa317d70735478cf97962fd157a0b1d.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@generated d Wed Nov 14 22:53:52 2012
*/
#include "common_magma.h"
#define PRECISION_d
#include "commonblas.h"
//
// m, n - dimensions in the source (input) matrix.
// This routine copies the ha matrix from the CPU
// to dat on the GPU. In addition, the output matrix
// is transposed. The routine uses a buffer of size
// 2*lddb*nb pointed to by dB (lddb > m) on the GPU.
// Note that lda >= m and lddat >= n.
//
extern "C" void
magmablas_dsetmatrix_transpose( magma_int_t m, magma_int_t n,
const double *ha, magma_int_t lda,
double *dat, magma_int_t ldda,
double *dB, magma_int_t lddb, magma_int_t nb )
{
magma_int_t i = 0, j = 0, ib;
/* Quick return */
if ( (m == 0) || (n == 0) )
return;
if (lda < m || ldda < n || lddb < m){
printf("Wrong arguments in zhtodt.\n");
return;
}
hipStream_t stream[2];
magma_queue_create( &stream[0] );
magma_queue_create( &stream[1] );
/* Move data from CPU to GPU in the first panel in the dB buffer */
ib = min(n-i, nb);
magma_dsetmatrix_async( m, ib,
ha + i*lda, lda,
dB + (j%2) * nb * lddb, lddb, stream[j%2] );
j++;
for(i=nb; i<n; i+=nb){
/* Move data from CPU to GPU in the second panel in the dB buffer */
ib = min(n-i, nb);
magma_dsetmatrix_async( m, ib,
ha+i*lda, lda,
dB + (j%2) * nb * lddb, lddb, stream[j%2] );
j++;
/* Note that the previous panel (i.e., j%2) comes through the stream
for the kernel so there is no need to synchronize. */
// magmablas_dtranspose2( dat+i-nb, ldda, dB + (j%2)*nb*lddb, lddb, m, nb);
magmablas_dtranspose2s( dat+i-nb, ldda, dB + (j%2)*nb*lddb, lddb, m, nb, &stream[j%2]);
}
/* Transpose the last part of the matrix. */
j++;
// magmablas_dtranspose2( dat+i-nb, ldda, dB + (j%2)*nb*lddb, lddb, m, ib);
magmablas_dtranspose2s( dat+i-nb, ldda, dB + (j%2)*nb*lddb, lddb, m, ib, &stream[j%2]);
magma_queue_destroy( stream[0] );
magma_queue_destroy( stream[1] );
}
//===========================================================================
// This version is similar to the above but for multiGPUs. The distribution
// is 1D block cyclic. The input arrays are pointers for the corresponding
// GPUs. The streams are passed as argument, in contrast to the single GPU
// routine.
// NOTE: see magmablas_dsetmatrix_transpose_mgpu.
//===========================================================================
extern "C" void
magmablas_dsetmatrix_transpose2( magma_int_t m, magma_int_t n,
const double *ha, magma_int_t lda,
double **dat, magma_int_t *ldda,
double **dB, magma_int_t lddb, magma_int_t nb,
magma_int_t num_gpus, hipStream_t stream[][2] )
{
magma_int_t i = 0, j[4] = {0, 0, 0, 0}, ib, k = 0;
/* Quick return */
if ( (m == 0) || (n == 0) )
return;
if (lda < m || lddb < m){
printf("Wrong arguments in zhtodt2.\n");
return;
}
if (n<num_gpus*nb){
for(i=0; i<n; i+=nb){
k = (i/nb)%num_gpus;
magma_setdevice(k);
ib = min(n-i, nb);
magma_dsetmatrix_async( m, ib,
ha+i*lda, lda,
dB[k], lddb, stream[k][0] );
}
for(i=0; i<n; i+=nb){
k = (i/nb)%num_gpus;
magma_setdevice(k);
ib = min(n-i, nb);
//magma_queue_sync( stream[k][0] );
//magmablas_dtranspose2( dat[k]+ i/(nb*num_gpus)*nb, ldda[k],
// dB[k], lddb, m, ib);
magmablas_dtranspose2s( dat[k]+ i/(nb*num_gpus)*nb, ldda[k],
dB[k], lddb, m, ib, &stream[k][0]);
}
}
else
{
for(i=0; i<(n + num_gpus*nb); i+=nb){
k = (i/nb)%num_gpus;
magma_setdevice(k);
if (i<n){
/* Move data from CPU to GPU in the second panel in the dB buffer */
ib = min(n-i, nb);
magma_dsetmatrix_async( m, ib,
ha+i*lda, lda,
dB[k] + (j[k]%2)*nb*lddb, lddb, stream[k][j[k]%2] );
}
j[k]++;
if (i> (num_gpus-1)*nb){
/* Make sure that the previous panel (i.e., j[k]%2) has arrived
and transpose it directly into the dat matrix */
//magma_queue_sync( stream[k][ j[k]%2 ] );
ib = min(n - i + num_gpus*nb, nb);
//magmablas_dtranspose2( dat[k]+ i/(nb*num_gpus)*nb -nb, ldda[k],
// dB[k] +(j[k]%2)*nb*lddb, lddb, m, ib);
magmablas_dtranspose2s( dat[k]+ i/(nb*num_gpus)*nb -nb, ldda[k],
dB[k] +(j[k]%2)*nb*lddb, lddb, m, ib, &stream[k][j[k]%2]);
}
}
}
}
| 7944215aefa317d70735478cf97962fd157a0b1d.cu | /*
-- MAGMA (version 1.3.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
November 2012
@generated d Wed Nov 14 22:53:52 2012
*/
#include "common_magma.h"
#define PRECISION_d
#include "commonblas.h"
//
// m, n - dimensions in the source (input) matrix.
// This routine copies the ha matrix from the CPU
// to dat on the GPU. In addition, the output matrix
// is transposed. The routine uses a buffer of size
// 2*lddb*nb pointed to by dB (lddb > m) on the GPU.
// Note that lda >= m and lddat >= n.
//
extern "C" void
magmablas_dsetmatrix_transpose( magma_int_t m, magma_int_t n,
const double *ha, magma_int_t lda,
double *dat, magma_int_t ldda,
double *dB, magma_int_t lddb, magma_int_t nb )
{
magma_int_t i = 0, j = 0, ib;
/* Quick return */
if ( (m == 0) || (n == 0) )
return;
if (lda < m || ldda < n || lddb < m){
printf("Wrong arguments in zhtodt.\n");
return;
}
cudaStream_t stream[2];
magma_queue_create( &stream[0] );
magma_queue_create( &stream[1] );
/* Move data from CPU to GPU in the first panel in the dB buffer */
ib = min(n-i, nb);
magma_dsetmatrix_async( m, ib,
ha + i*lda, lda,
dB + (j%2) * nb * lddb, lddb, stream[j%2] );
j++;
for(i=nb; i<n; i+=nb){
/* Move data from CPU to GPU in the second panel in the dB buffer */
ib = min(n-i, nb);
magma_dsetmatrix_async( m, ib,
ha+i*lda, lda,
dB + (j%2) * nb * lddb, lddb, stream[j%2] );
j++;
/* Note that the previous panel (i.e., j%2) comes through the stream
for the kernel so there is no need to synchronize. */
// magmablas_dtranspose2( dat+i-nb, ldda, dB + (j%2)*nb*lddb, lddb, m, nb);
magmablas_dtranspose2s( dat+i-nb, ldda, dB + (j%2)*nb*lddb, lddb, m, nb, &stream[j%2]);
}
/* Transpose the last part of the matrix. */
j++;
// magmablas_dtranspose2( dat+i-nb, ldda, dB + (j%2)*nb*lddb, lddb, m, ib);
magmablas_dtranspose2s( dat+i-nb, ldda, dB + (j%2)*nb*lddb, lddb, m, ib, &stream[j%2]);
magma_queue_destroy( stream[0] );
magma_queue_destroy( stream[1] );
}
//===========================================================================
// This version is similar to the above but for multiGPUs. The distribution
// is 1D block cyclic. The input arrays are pointers for the corresponding
// GPUs. The streams are passed as argument, in contrast to the single GPU
// routine.
// NOTE: see magmablas_dsetmatrix_transpose_mgpu.
//===========================================================================
extern "C" void
magmablas_dsetmatrix_transpose2( magma_int_t m, magma_int_t n,
const double *ha, magma_int_t lda,
double **dat, magma_int_t *ldda,
double **dB, magma_int_t lddb, magma_int_t nb,
magma_int_t num_gpus, cudaStream_t stream[][2] )
{
magma_int_t i = 0, j[4] = {0, 0, 0, 0}, ib, k = 0;
/* Quick return */
if ( (m == 0) || (n == 0) )
return;
if (lda < m || lddb < m){
printf("Wrong arguments in zhtodt2.\n");
return;
}
if (n<num_gpus*nb){
for(i=0; i<n; i+=nb){
k = (i/nb)%num_gpus;
magma_setdevice(k);
ib = min(n-i, nb);
magma_dsetmatrix_async( m, ib,
ha+i*lda, lda,
dB[k], lddb, stream[k][0] );
}
for(i=0; i<n; i+=nb){
k = (i/nb)%num_gpus;
magma_setdevice(k);
ib = min(n-i, nb);
//magma_queue_sync( stream[k][0] );
//magmablas_dtranspose2( dat[k]+ i/(nb*num_gpus)*nb, ldda[k],
// dB[k], lddb, m, ib);
magmablas_dtranspose2s( dat[k]+ i/(nb*num_gpus)*nb, ldda[k],
dB[k], lddb, m, ib, &stream[k][0]);
}
}
else
{
for(i=0; i<(n + num_gpus*nb); i+=nb){
k = (i/nb)%num_gpus;
magma_setdevice(k);
if (i<n){
/* Move data from CPU to GPU in the second panel in the dB buffer */
ib = min(n-i, nb);
magma_dsetmatrix_async( m, ib,
ha+i*lda, lda,
dB[k] + (j[k]%2)*nb*lddb, lddb, stream[k][j[k]%2] );
}
j[k]++;
if (i> (num_gpus-1)*nb){
/* Make sure that the previous panel (i.e., j[k]%2) has arrived
and transpose it directly into the dat matrix */
//magma_queue_sync( stream[k][ j[k]%2 ] );
ib = min(n - i + num_gpus*nb, nb);
//magmablas_dtranspose2( dat[k]+ i/(nb*num_gpus)*nb -nb, ldda[k],
// dB[k] +(j[k]%2)*nb*lddb, lddb, m, ib);
magmablas_dtranspose2s( dat[k]+ i/(nb*num_gpus)*nb -nb, ldda[k],
dB[k] +(j[k]%2)*nb*lddb, lddb, m, ib, &stream[k][j[k]%2]);
}
}
}
}
|
a1e3a04a362a0c04b4e66701eae80c11e5155a06.hip | // !!! This is a file automatically generated by hipify!!!
#define DIM 64
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include <hip/hip_runtime_api.h>
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <chrono>
#define TILE_DIM 32
__global__
void MatrixMulKernel(double *M, double *N, double *P, int Width) {
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
if ((Row < Width) && (Col < Width)) {
double Pvalue = 0;
for (int k = 0; k < Width; k++) {
Pvalue += M[Row*Width + k] * N[k*Width + Col];
}
P[Row*Width + Col] = Pvalue;
}
}
//FUNZIONE CHE RIEMPIE LA MATRICE DI NUMERI double CASUALI
void populateMatrix(double *M) {
srand(time(NULL));
for (int i = 0; i < DIM; i++) {
for (int j = 0; j < DIM; j++) {
//M[i * DIM + j] = (double)(1.0);
M[i * DIM + j] = (double)((rand() % 10000) /(double)DIM);
}
}
}
__global__ void MatMul(double* A, double* B, double* C, int ARows, int ACols, int BRows, int BCols, int CRows, int CCols) {
double CValue = 0;
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ double As[TILE_DIM][TILE_DIM];
__shared__ double Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + ACols - 1)/TILE_DIM; k++) {
if (k*TILE_DIM + threadIdx.x < ACols && Row < ARows)
As[threadIdx.y][threadIdx.x] = A[Row*ACols + k*TILE_DIM + threadIdx.x];
else
As[threadIdx.y][threadIdx.x] = 0.0;
if (k*TILE_DIM + threadIdx.y < BRows && Col < BCols)
Bs[threadIdx.y][threadIdx.x] = B[(k*TILE_DIM + threadIdx.y)*BCols + Col];
else
Bs[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n)
CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x];
__syncthreads();
}
if (Row < CRows && Col < CCols)
C[((blockIdx.y * blockDim.y + threadIdx.y)*CCols) +
(blockIdx.x * blockDim.x)+ threadIdx.x] = CValue;
}
void LaunchKernel(double *M, double *N, double *P, int size) {
//Creazione Streams
int n_stream = 4;
hipStream_t stream[n_stream];
for(int i = 0; i < n_stream; i++){
hipStreamCreate(&stream[i]);
}
// Divisione della matrice in 2 sottomatrici
double *N_3 = (double *)malloc(DIM * DIM/2 *(sizeof(double)));
double *N_4 = (double *)malloc(DIM * DIM/2 *(sizeof(double)));
for(int i = 0; i < DIM; i++){
for(int j = 0; j < DIM; j++){
if(j < DIM/2)
N_3[i*DIM/2 + j]=N[i*DIM+j];
else
N_4[i*DIM/2 + (j-DIM/2)] = N[i*DIM + j];
}
}
//Allocazione dei segmenti di memoria
size_t slice = DIM * (DIM/2);
double *d_M0, *d_N0, *d_P0;
double *d_M1, *d_N1, *d_P1;
double *d_M2, *d_N2, *d_P2;
double *d_M3, *d_N3, *d_P3;
hipMalloc((void **)&d_M0, DIM * (DIM/2) * sizeof(double));
hipMalloc((void **)&d_N0, DIM * (DIM/2) * sizeof(double));
hipMalloc((void **)&d_P0, (DIM/2)* (DIM/2) * sizeof(double));
hipMalloc((void **)&d_M1, DIM * (DIM/2) * sizeof(double));
hipMalloc((void **)&d_N1, DIM * (DIM/2) * sizeof(double));
hipMalloc((void **)&d_P1, (DIM/2) * (DIM/2) * sizeof(double));
hipMalloc((void **)&d_M2, DIM * (DIM/2) * sizeof(double));
hipMalloc((void **)&d_N2, DIM * (DIM/2) * sizeof(double));
hipMalloc((void **)&d_P2, (DIM/2) * (DIM/2) * sizeof(double));
hipMalloc((void **)&d_M3, DIM * (DIM/2) * sizeof(double));
hipMalloc((void **)&d_N3, DIM * (DIM/2) * sizeof(double));
hipMalloc((void **)&d_P3, (DIM/2) * (DIM/2) * sizeof(double));
// DICHIARAZIONE blockDim e gridDim
dim3 block(TILE_DIM, TILE_DIM, 1);
dim3 grid(ceil((double)DIM / block.x), ceil(((double)DIM/2)/block.y) , 1);
// Esecuzione del kernel
std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now();
hipMemcpyAsync(d_M0, M, slice * sizeof (double), hipMemcpyHostToDevice, stream[0]);
hipMemcpyAsync(d_N0, N_3, slice * sizeof (double), hipMemcpyHostToDevice, stream[0]);
double *ker0 = (double *)malloc(DIM/2 * DIM/2 * sizeof(double));
hipMemcpyAsync(d_M1, M, slice * sizeof (double), hipMemcpyHostToDevice, stream[1]);
hipMemcpyAsync(d_N1, N_4, slice * sizeof (double), hipMemcpyHostToDevice, stream[1]);
double *ker1 = (double *)malloc(DIM/2 * DIM/2 * sizeof(double));
hipMemcpyAsync(d_M2, M + slice, slice * sizeof (double), hipMemcpyHostToDevice, stream[2]);
hipMemcpyAsync(d_N2, N_3, slice * sizeof (double), hipMemcpyHostToDevice, stream[2]);
double *ker2 = (double *)malloc(DIM/2 * DIM/2 * sizeof(double));
hipMemcpyAsync(d_M3, M + slice, slice * sizeof (double), hipMemcpyHostToDevice, stream[3]);
hipMemcpyAsync(d_N3, N_4, slice * sizeof (double), hipMemcpyHostToDevice, stream[3]);
double *ker3 = (double *)malloc(DIM/2 * DIM/2 * sizeof(double));
hipLaunchKernelGGL(( MatMul), dim3(grid), dim3(block), block.x * block.y * sizeof(double), stream[0], d_M0, d_N0, d_P0, DIM/2, DIM, DIM, DIM/2, DIM/2, DIM/2);
hipLaunchKernelGGL(( MatMul), dim3(grid), dim3(block), block.x * block.y * sizeof(double), stream[1], d_M1, d_N1, d_P1, DIM/2, DIM, DIM, DIM/2, DIM/2, DIM/2);
hipLaunchKernelGGL(( MatMul), dim3(grid), dim3(block), block.x * block.y * sizeof(double), stream[2], d_M2, d_N2, d_P2, DIM/2, DIM, DIM, DIM/2, DIM/2, DIM/2);
hipLaunchKernelGGL(( MatMul), dim3(grid), dim3(block), block.x * block.y * sizeof(double), stream[3], d_M3, d_N3, d_P3, DIM/2, DIM, DIM, DIM/2, DIM/2, DIM/2);
hipDeviceSynchronize();
hipMemcpyAsync(ker0, d_P0, DIM/2 * DIM/2 * sizeof (double), hipMemcpyDeviceToHost, stream[0]);
hipMemcpyAsync(ker1, d_P1, DIM/2 * DIM/2 * sizeof (double), hipMemcpyDeviceToHost, stream[1]);
hipMemcpyAsync(ker2, d_P2, DIM/2 * DIM/2 * sizeof (double), hipMemcpyDeviceToHost, stream[2]);
hipMemcpyAsync(ker3, d_P3, DIM/2 * DIM/2 * sizeof (double), hipMemcpyDeviceToHost, stream[3]);
std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();
double tempo = std::chrono::duration_cast<std::chrono::duration<double> >(end - start).count();
printf("%lf\n", tempo);
// Copio le sottomatrici nella matrixce finale
for(int i = 0; i < DIM; i++){
for(int j = 0; j<DIM ; j++){
if(i < DIM/2 && j < DIM/2)
P[i * DIM + j ] = ker0[i * DIM/2 + j];
else if(i < DIM/2 && j >= DIM/2)
P[i * DIM + j ] = ker1[i * DIM/2 + (j-DIM/2)];
else if(i >= DIM/2 && j < DIM/2)
P[i * DIM + j ] = ker2[(i-DIM/2) * DIM/2 + j];
else if(i >= DIM/2 && j >= DIM/2)
P[i * DIM + j ] = ker3[(i-DIM/2) * DIM/2 + (j-DIM/2)];
}
}
hipFree(d_M0);
hipFree(d_N0);
hipFree(d_P0);
hipFree(d_M1);
hipFree(d_N1);
hipFree(d_P1);
hipFree(d_M2);
hipFree(d_N2);
hipFree(d_P2);
hipFree(d_M3);
hipFree(d_N3);
hipFree(d_P3);
}
void MatrixMulHost(double *A, double *B, double *C) {
//std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now();
int c, d, k;
for (c = 0; c < DIM; c++) {
for (d = 0; d < DIM; d++) {
double Pvalue = 0;
for (k = 0; k < DIM; k++) {
Pvalue += A[c * DIM + k] * B[k * DIM + d];
}
C[c * DIM + d] = Pvalue;
}
}
//std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();
//double tempo = std::chrono::duration_cast<std::chrono::duration<double> >(end - start).count();
//printf("TEMPO ELABORAZIONE SU HOST: %lf\n", tempo);
}
int main() {
double *A = (double *)malloc(DIM * DIM * sizeof(double));
double *B = (double *)malloc(DIM * DIM * sizeof(double));
double *C = (double *)malloc(DIM * DIM * sizeof(double));
double *C_H = (double *)malloc(DIM * DIM * sizeof(double));
populateMatrix(A);
populateMatrix(B);
std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now();
LaunchKernel(&A[0], &B[0], &C[0], DIM);
std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();
double tempo = std::chrono::duration_cast<std::chrono::duration<double> >(end - start).count();
}
| a1e3a04a362a0c04b4e66701eae80c11e5155a06.cu | #define DIM 64
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include <stdio.h>
#include <cuda.h>
#include <time.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <chrono>
#define TILE_DIM 32
__global__
void MatrixMulKernel(double *M, double *N, double *P, int Width) {
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
if ((Row < Width) && (Col < Width)) {
double Pvalue = 0;
for (int k = 0; k < Width; k++) {
Pvalue += M[Row*Width + k] * N[k*Width + Col];
}
P[Row*Width + Col] = Pvalue;
}
}
//FUNZIONE CHE RIEMPIE LA MATRICE DI NUMERI double CASUALI
void populateMatrix(double *M) {
srand(time(NULL));
for (int i = 0; i < DIM; i++) {
for (int j = 0; j < DIM; j++) {
//M[i * DIM + j] = (double)(1.0);
M[i * DIM + j] = (double)((rand() % 10000) /(double)DIM);
}
}
}
__global__ void MatMul(double* A, double* B, double* C, int ARows, int ACols, int BRows, int BCols, int CRows, int CCols) {
double CValue = 0;
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ double As[TILE_DIM][TILE_DIM];
__shared__ double Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + ACols - 1)/TILE_DIM; k++) {
if (k*TILE_DIM + threadIdx.x < ACols && Row < ARows)
As[threadIdx.y][threadIdx.x] = A[Row*ACols + k*TILE_DIM + threadIdx.x];
else
As[threadIdx.y][threadIdx.x] = 0.0;
if (k*TILE_DIM + threadIdx.y < BRows && Col < BCols)
Bs[threadIdx.y][threadIdx.x] = B[(k*TILE_DIM + threadIdx.y)*BCols + Col];
else
Bs[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n)
CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x];
__syncthreads();
}
if (Row < CRows && Col < CCols)
C[((blockIdx.y * blockDim.y + threadIdx.y)*CCols) +
(blockIdx.x * blockDim.x)+ threadIdx.x] = CValue;
}
void LaunchKernel(double *M, double *N, double *P, int size) {
//Creazione Streams
int n_stream = 4;
cudaStream_t stream[n_stream];
for(int i = 0; i < n_stream; i++){
cudaStreamCreate(&stream[i]);
}
// Divisione della matrice in 2 sottomatrici
double *N_3 = (double *)malloc(DIM * DIM/2 *(sizeof(double)));
double *N_4 = (double *)malloc(DIM * DIM/2 *(sizeof(double)));
for(int i = 0; i < DIM; i++){
for(int j = 0; j < DIM; j++){
if(j < DIM/2)
N_3[i*DIM/2 + j]=N[i*DIM+j];
else
N_4[i*DIM/2 + (j-DIM/2)] = N[i*DIM + j];
}
}
//Allocazione dei segmenti di memoria
size_t slice = DIM * (DIM/2);
double *d_M0, *d_N0, *d_P0;
double *d_M1, *d_N1, *d_P1;
double *d_M2, *d_N2, *d_P2;
double *d_M3, *d_N3, *d_P3;
cudaMalloc((void **)&d_M0, DIM * (DIM/2) * sizeof(double));
cudaMalloc((void **)&d_N0, DIM * (DIM/2) * sizeof(double));
cudaMalloc((void **)&d_P0, (DIM/2)* (DIM/2) * sizeof(double));
cudaMalloc((void **)&d_M1, DIM * (DIM/2) * sizeof(double));
cudaMalloc((void **)&d_N1, DIM * (DIM/2) * sizeof(double));
cudaMalloc((void **)&d_P1, (DIM/2) * (DIM/2) * sizeof(double));
cudaMalloc((void **)&d_M2, DIM * (DIM/2) * sizeof(double));
cudaMalloc((void **)&d_N2, DIM * (DIM/2) * sizeof(double));
cudaMalloc((void **)&d_P2, (DIM/2) * (DIM/2) * sizeof(double));
cudaMalloc((void **)&d_M3, DIM * (DIM/2) * sizeof(double));
cudaMalloc((void **)&d_N3, DIM * (DIM/2) * sizeof(double));
cudaMalloc((void **)&d_P3, (DIM/2) * (DIM/2) * sizeof(double));
// DICHIARAZIONE blockDim e gridDim
dim3 block(TILE_DIM, TILE_DIM, 1);
dim3 grid(ceil((double)DIM / block.x), ceil(((double)DIM/2)/block.y) , 1);
// Esecuzione del kernel
std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now();
cudaMemcpyAsync(d_M0, M, slice * sizeof (double), cudaMemcpyHostToDevice, stream[0]);
cudaMemcpyAsync(d_N0, N_3, slice * sizeof (double), cudaMemcpyHostToDevice, stream[0]);
double *ker0 = (double *)malloc(DIM/2 * DIM/2 * sizeof(double));
cudaMemcpyAsync(d_M1, M, slice * sizeof (double), cudaMemcpyHostToDevice, stream[1]);
cudaMemcpyAsync(d_N1, N_4, slice * sizeof (double), cudaMemcpyHostToDevice, stream[1]);
double *ker1 = (double *)malloc(DIM/2 * DIM/2 * sizeof(double));
cudaMemcpyAsync(d_M2, M + slice, slice * sizeof (double), cudaMemcpyHostToDevice, stream[2]);
cudaMemcpyAsync(d_N2, N_3, slice * sizeof (double), cudaMemcpyHostToDevice, stream[2]);
double *ker2 = (double *)malloc(DIM/2 * DIM/2 * sizeof(double));
cudaMemcpyAsync(d_M3, M + slice, slice * sizeof (double), cudaMemcpyHostToDevice, stream[3]);
cudaMemcpyAsync(d_N3, N_4, slice * sizeof (double), cudaMemcpyHostToDevice, stream[3]);
double *ker3 = (double *)malloc(DIM/2 * DIM/2 * sizeof(double));
MatMul<<<grid, block, block.x * block.y * sizeof(double), stream[0]>>>(d_M0, d_N0, d_P0, DIM/2, DIM, DIM, DIM/2, DIM/2, DIM/2);
MatMul<<<grid, block, block.x * block.y * sizeof(double), stream[1]>>>(d_M1, d_N1, d_P1, DIM/2, DIM, DIM, DIM/2, DIM/2, DIM/2);
MatMul<<<grid, block, block.x * block.y * sizeof(double), stream[2]>>>(d_M2, d_N2, d_P2, DIM/2, DIM, DIM, DIM/2, DIM/2, DIM/2);
MatMul<<<grid, block, block.x * block.y * sizeof(double), stream[3]>>>(d_M3, d_N3, d_P3, DIM/2, DIM, DIM, DIM/2, DIM/2, DIM/2);
cudaDeviceSynchronize();
cudaMemcpyAsync(ker0, d_P0, DIM/2 * DIM/2 * sizeof (double), cudaMemcpyDeviceToHost, stream[0]);
cudaMemcpyAsync(ker1, d_P1, DIM/2 * DIM/2 * sizeof (double), cudaMemcpyDeviceToHost, stream[1]);
cudaMemcpyAsync(ker2, d_P2, DIM/2 * DIM/2 * sizeof (double), cudaMemcpyDeviceToHost, stream[2]);
cudaMemcpyAsync(ker3, d_P3, DIM/2 * DIM/2 * sizeof (double), cudaMemcpyDeviceToHost, stream[3]);
std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();
double tempo = std::chrono::duration_cast<std::chrono::duration<double> >(end - start).count();
printf("%lf\n", tempo);
// Copio le sottomatrici nella matrixce finale
for(int i = 0; i < DIM; i++){
for(int j = 0; j<DIM ; j++){
if(i < DIM/2 && j < DIM/2)
P[i * DIM + j ] = ker0[i * DIM/2 + j];
else if(i < DIM/2 && j >= DIM/2)
P[i * DIM + j ] = ker1[i * DIM/2 + (j-DIM/2)];
else if(i >= DIM/2 && j < DIM/2)
P[i * DIM + j ] = ker2[(i-DIM/2) * DIM/2 + j];
else if(i >= DIM/2 && j >= DIM/2)
P[i * DIM + j ] = ker3[(i-DIM/2) * DIM/2 + (j-DIM/2)];
}
}
cudaFree(d_M0);
cudaFree(d_N0);
cudaFree(d_P0);
cudaFree(d_M1);
cudaFree(d_N1);
cudaFree(d_P1);
cudaFree(d_M2);
cudaFree(d_N2);
cudaFree(d_P2);
cudaFree(d_M3);
cudaFree(d_N3);
cudaFree(d_P3);
}
void MatrixMulHost(double *A, double *B, double *C) {
//std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now();
int c, d, k;
for (c = 0; c < DIM; c++) {
for (d = 0; d < DIM; d++) {
double Pvalue = 0;
for (k = 0; k < DIM; k++) {
Pvalue += A[c * DIM + k] * B[k * DIM + d];
}
C[c * DIM + d] = Pvalue;
}
}
//std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();
//double tempo = std::chrono::duration_cast<std::chrono::duration<double> >(end - start).count();
//printf("TEMPO ELABORAZIONE SU HOST: %lf\n", tempo);
}
int main() {
double *A = (double *)malloc(DIM * DIM * sizeof(double));
double *B = (double *)malloc(DIM * DIM * sizeof(double));
double *C = (double *)malloc(DIM * DIM * sizeof(double));
double *C_H = (double *)malloc(DIM * DIM * sizeof(double));
populateMatrix(A);
populateMatrix(B);
std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now();
LaunchKernel(&A[0], &B[0], &C[0], DIM);
std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();
double tempo = std::chrono::duration_cast<std::chrono::duration<double> >(end - start).count();
}
|
0542945ddbdc8826e39001f8170efdb3147b220b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernels.h"
/* This is the kernel function that performs the sum reduction.
* This one use the most basic approach (called "interleaved indexing").
* Each thread adds a pair of elements right next to each other.
* The stride in the code below is the distance between the elements that each thread adds.
* This distance doubles on each iteration.
* Note that the number of threads required also halves on each iteration.
*
* Notes on kernel args: the arguments passed in for the arrays must be *device buffers* (not host buffers)!
* n is an integer that is passed in from the host when the kernel is launched.
* No cudaMemCpy is require to do this (args on the stack are copied).
*/
__global__ void reduce(float *input, float *output, unsigned int n)
{
// Determine this thread's various ids
unsigned int block_size = blockDim.x;
unsigned int thread_id = threadIdx.x;
unsigned int block_id = blockIdx.x;
// The size of the chunk of data this thread's block is working on.
unsigned int chunk_size = block_size * 2;
// Calculate the index that this block's chunk of values starts at.
// Each thread adds 2 values, so each block adds a total of block_size * 2 values.
unsigned int block_start = block_id * chunk_size;
// Perform the reduction using "interleaved indexing" (each thread adds a pair of
// elements right next to each other).
// "stride" is the distance between the elements that each thread adds.
// This distance doubles on each iteration.
// The number of threads required halves on each iteration.
unsigned int left; // holds index of left operand
unsigned int right; // holds index or right operand
unsigned int threads = block_size;
for (unsigned int stride = 1; stride < chunk_size; stride *= 2, threads /= 2)
{
// There's a distance of stride between each pair of left and right operand indices,
// so there's a distance of stride * 2 between consecutive left indices
left = block_start + thread_id * (stride * 2);
right = left + stride;
if (thread_id < threads // read: "If this thread should be
// active on this iteration of the reduction."
&& right < n) // If we're the last block, we may be running more threads
// than we need - this condition makes sure they dont interfere.
{
input[left] += input[right];
}
// Each block may be running multiple warps. These warps may not all be in
// sync. The call below syncs the warps in the block at the end of each iteration
// so that the results are written to memory before the next iteration begins.
__syncthreads();
}
// Once the loop is done, the partial sum for this block will be in the leftmost index
// of this block's chunk. The code below causes each block's thread 0 to write that
// partial result to the output buffer at position "block_id". After the code
// below completes, the output buffer will contain exactly <number of blocks>
// consecutive partial results.
if (!thread_id)
{
output[block_id] = input[block_start];
}
}
| 0542945ddbdc8826e39001f8170efdb3147b220b.cu | #include "kernels.h"
/* This is the kernel function that performs the sum reduction.
* This one use the most basic approach (called "interleaved indexing").
* Each thread adds a pair of elements right next to each other.
* The stride in the code below is the distance between the elements that each thread adds.
* This distance doubles on each iteration.
* Note that the number of threads required also halves on each iteration.
*
* Notes on kernel args: the arguments passed in for the arrays must be *device buffers* (not host buffers)!
* n is an integer that is passed in from the host when the kernel is launched.
* No cudaMemCpy is require to do this (args on the stack are copied).
*/
__global__ void reduce(float *input, float *output, unsigned int n)
{
// Determine this thread's various ids
unsigned int block_size = blockDim.x;
unsigned int thread_id = threadIdx.x;
unsigned int block_id = blockIdx.x;
// The size of the chunk of data this thread's block is working on.
unsigned int chunk_size = block_size * 2;
// Calculate the index that this block's chunk of values starts at.
// Each thread adds 2 values, so each block adds a total of block_size * 2 values.
unsigned int block_start = block_id * chunk_size;
// Perform the reduction using "interleaved indexing" (each thread adds a pair of
// elements right next to each other).
// "stride" is the distance between the elements that each thread adds.
// This distance doubles on each iteration.
// The number of threads required halves on each iteration.
unsigned int left; // holds index of left operand
unsigned int right; // holds index or right operand
unsigned int threads = block_size;
for (unsigned int stride = 1; stride < chunk_size; stride *= 2, threads /= 2)
{
// There's a distance of stride between each pair of left and right operand indices,
// so there's a distance of stride * 2 between consecutive left indices
left = block_start + thread_id * (stride * 2);
right = left + stride;
if (thread_id < threads // read: "If this thread should be
// active on this iteration of the reduction."
&& right < n) // If we're the last block, we may be running more threads
// than we need - this condition makes sure they dont interfere.
{
input[left] += input[right];
}
// Each block may be running multiple warps. These warps may not all be in
// sync. The call below syncs the warps in the block at the end of each iteration
// so that the results are written to memory before the next iteration begins.
__syncthreads();
}
// Once the loop is done, the partial sum for this block will be in the leftmost index
// of this block's chunk. The code below causes each block's thread 0 to write that
// partial result to the output buffer at position "block_id". After the code
// below completes, the output buffer will contain exactly <number of blocks>
// consecutive partial results.
if (!thread_id)
{
output[block_id] = input[block_start];
}
}
|
1cd096421d6385f38994d8014eccc6f18ee1b52c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2013-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#include "compositor_data.h"
#include "vector_math.h"
// Compositor kernel to copy the tiles in the texelBuffer into the final outputBuffer location.
extern "C" __global__ void compositor(CompositorData* args)
{
const unsigned int xLaunch = blockDim.x * blockIdx.x + threadIdx.x;
const unsigned int yLaunch = blockDim.y * blockIdx.y + threadIdx.y;
if (yLaunch < args->resolution.y)
{
// First calculate block coordinates of this launch index.
// That is the launch index divided by the tile dimensions. (No operator>>() on vectors?)
const unsigned int xBlock = xLaunch >> args->tileShift.x;
const unsigned int yBlock = yLaunch >> args->tileShift.y;
// Each device needs to start at a different column and each row should start with a different device.
const unsigned int xTile = xBlock * args->deviceCount + ((args->deviceIndex + yBlock) % args->deviceCount);
// The horizontal pixel coordinate is: tile coordinate * tile width + launch index % tile width.
const unsigned int xPixel = xTile * args->tileSize.x + (xLaunch & (args->tileSize.x - 1)); // tileSize needs to be power-of-two for this modulo operation.
if (xPixel < args->resolution.x)
{
const float4 *src = reinterpret_cast<float4*>(args->tileBuffer);
float4 *dst = reinterpret_cast<float4*>(args->outputBuffer);
// The src location needs to be calculated with the original launch width, because gridDim.x * blockDim.x might be different.
dst[yLaunch * args->resolution.x + xPixel] = src[yLaunch * args->launchWidth + xLaunch]; // Copy one float4 per launch index.
}
}
}
| 1cd096421d6385f38994d8014eccc6f18ee1b52c.cu | /*
* Copyright (c) 2013-2020, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#include "compositor_data.h"
#include "vector_math.h"
// Compositor kernel to copy the tiles in the texelBuffer into the final outputBuffer location.
extern "C" __global__ void compositor(CompositorData* args)
{
const unsigned int xLaunch = blockDim.x * blockIdx.x + threadIdx.x;
const unsigned int yLaunch = blockDim.y * blockIdx.y + threadIdx.y;
if (yLaunch < args->resolution.y)
{
// First calculate block coordinates of this launch index.
// That is the launch index divided by the tile dimensions. (No operator>>() on vectors?)
const unsigned int xBlock = xLaunch >> args->tileShift.x;
const unsigned int yBlock = yLaunch >> args->tileShift.y;
// Each device needs to start at a different column and each row should start with a different device.
const unsigned int xTile = xBlock * args->deviceCount + ((args->deviceIndex + yBlock) % args->deviceCount);
// The horizontal pixel coordinate is: tile coordinate * tile width + launch index % tile width.
const unsigned int xPixel = xTile * args->tileSize.x + (xLaunch & (args->tileSize.x - 1)); // tileSize needs to be power-of-two for this modulo operation.
if (xPixel < args->resolution.x)
{
const float4 *src = reinterpret_cast<float4*>(args->tileBuffer);
float4 *dst = reinterpret_cast<float4*>(args->outputBuffer);
// The src location needs to be calculated with the original launch width, because gridDim.x * blockDim.x might be different.
dst[yLaunch * args->resolution.x + xPixel] = src[yLaunch * args->launchWidth + xLaunch]; // Copy one float4 per launch index.
}
}
}
|
e16149607841e429168238ea70af0ed2f31115bd.hip | // !!! This is a file automatically generated by hipify!!!
/**
! nvcc RGB2Gray.cu -o main `pkg-config opencv --cflags --libs`
! ldd mian //
! cp -r /usr/local/lib/libopencv* /lib // /lib
*/
#include <iostream>
#include <hip/hip_runtime.h>
// #include <hip/hip_runtime.h>
// #include <hip/hip_runtime_api.h>
#include <cmath>
#include <ctime>
#include <opencv2/opencv.hpp>
#include "common/book.h"
// #include "common/image.h"
#define mycout cout<<"["<<__FILE__<<":"<<__LINE__<<"] "
/* id get thread id: 1D block and 2D grid <<<(32,32),32>>>*/
#define get_tid() (blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x) + threadIdx.x) // 2D grid,1D block
// #define get_tid() (blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x) + threadIdx.x+threadIdx.y*blockDim.x) // 2D grid,2D block
/* get block id: 2D grid */
#define get_bid() (blockIdx.x + blockIdx.y * gridDim.x)
/* blockid*/
// #define get_tid_per_block() (threadIdx.x+threadIdx.y*blockDim.x) // 2D block
#define get_tid_per_block() (threadIdx.x)
#define get_ptr(image) ((unsigned char*)image.data)
#define image_size(image) (image.cols * image.rows)
using namespace std;
using namespace cv;
typedef float FLOAT;
__device__
unsigned char FLOAT2uchar(FLOAT value)
{
if(value < 0)
value = 0;
else if(value > 255)
value = 255;
return (unsigned char)value;
//return saturate_cast<unsigned char>(value);
}
__global__
void split_channel(unsigned char *dev_img,unsigned char *dev_B,
unsigned char *dev_G,unsigned char *dev_R,const int height,const int width)
{
int idx=get_tid();
if (idx>=height*width) return;
// idx image
//int row=idx/width;
//int col=idx%width;
// opencvBGR
dev_B[idx]=dev_img[idx*3+0];
dev_G[idx]=dev_img[idx*3+1];
dev_R[idx]=dev_img[idx*3+2];
}
__global__
void blur(unsigned char *dev_B,unsigned char *dev_B2,FLOAT *conv_kernel,
const int height,const int width,const int kernel_size)
{
int idx=get_tid();
if (idx>=height*width) return;
// idx image
int row=idx/width;
int col=idx%width;
unsigned char img_value=0;
FLOAT tmp_value=0;
int cur_row=0;
int cur_col=0;
for(int i=0;i<kernel_size;++i)
{
for(int j=0;j<kernel_size;++j)
{
//
cur_row=row-kernel_size/2+i;
cur_col=col-kernel_size/2+j;
if(cur_row<0 || cur_col<0 || cur_row>=height || cur_col>=width)
{
img_value=0;
}
else
{
//
img_value=dev_B[cur_row*width+cur_col];
}
tmp_value+=img_value*conv_kernel[j+i*kernel_size]; //
}
}
// dev_B2[idx]=(unsigned char)tmp_value; // 255
dev_B2[idx]=FLOAT2uchar(tmp_value);
}
__global__
void concat_channel(unsigned char *dev_B2,unsigned char *dev_G2,unsigned char *dev_R2,
unsigned char *dev_img,const int height,const int width)
{
int idx=get_tid();
if (idx>=height*width) return;
// idx image
// int row=idx/width;
// int col=idx%width;
// opencvBGR
dev_img[idx*3+0]=dev_B2[idx];
dev_img[idx*3+1]=dev_G2[idx];
dev_img[idx*3+2]=dev_R2[idx];
}
int main(int argc,char* argv[])
{
mycout<<"blur \n"<<
"1BGR3\n"<<
"2\n"<<
"33\n"
": ./main xxxx.jpg xxxx.jpg"<<endl;
if(argc<3) return -1;
//
Mat img = imread(argv[1], IMREAD_COLOR);
if (img.empty())
{
mycout <<"load image fail"<<endl;
return -1;
}
//
const int kernel_size=3;
FLOAT tmp_kernel[]={0,-1,0,-1,5,-1,0,-1,0};
FLOAT *conv_kernel=NULL;
// CPUGPU
//HANDLE_ERROR(hipMallocManaged((void**)&conv_kernel,kernel_size*kernel_size*sizeof(FLOAT),
// hipMemAttachGlobal));
// conv_kernel=tmp_kernel;
HANDLE_ERROR( hipMalloc( (void**)&conv_kernel,kernel_size*kernel_size*sizeof(FLOAT) ) );
HANDLE_ERROR( hipMemcpy( conv_kernel,tmp_kernel,kernel_size*kernel_size*sizeof(FLOAT) ,hipMemcpyHostToDevice ) );
/**============GPU3============================*/
Mat B=Mat::zeros(img.size(),CV_8UC1);
Mat G=Mat::zeros(img.size(),CV_8UC1);
Mat R=Mat::zeros(img.size(),CV_8UC1);
// GPU
unsigned char *dev_img=NULL,*dev_B=NULL,*dev_G=NULL,*dev_R=NULL;
HANDLE_ERROR( hipMalloc( (void**)&dev_img,image_size(img)*3 ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_B,image_size(img) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_G,image_size(img) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_R,image_size(img) ) );
HANDLE_ERROR( hipMemcpy( dev_img,img.data,image_size(img)*3 ,hipMemcpyHostToDevice ) );
// GPU
dim3 grid(img.rows,img.cols);
hipLaunchKernelGGL(( split_channel), dim3(grid),dim3(1), 0, 0, dev_img,dev_B,dev_G,dev_R,img.rows,img.cols);
/**============3blur============================*/
unsigned char *dev_B2=NULL,*dev_G2=NULL,*dev_R2=NULL;
HANDLE_ERROR( hipMalloc( (void**)&dev_B2,image_size(img) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_G2,image_size(img) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_R2,image_size(img) ) );
// GPU
hipLaunchKernelGGL(( blur), dim3(grid),dim3(1), 0, 0, dev_B,dev_B2,conv_kernel,img.rows,img.cols,kernel_size);
hipLaunchKernelGGL(( blur), dim3(grid),dim3(1), 0, 0, dev_G,dev_G2,conv_kernel,img.rows,img.cols,kernel_size);
hipLaunchKernelGGL(( blur), dim3(grid),dim3(1), 0, 0, dev_R,dev_R2,conv_kernel,img.rows,img.cols,kernel_size);
/**============3============================*/
unsigned char *dev_img2=NULL;
HANDLE_ERROR( hipMalloc( (void**)&dev_img2,image_size(img)*3 ) );
hipLaunchKernelGGL(( concat_channel), dim3(grid),dim3(1), 0, 0, dev_B2,dev_G2,dev_R2,dev_img2,img.rows,img.cols);
// GPU -->CPU
// Mat
Mat blurImg=Mat::zeros(img.size(),CV_8UC3);
HANDLE_ERROR( hipMemcpy( blurImg.data,dev_img2,image_size(img)*3,hipMemcpyDeviceToHost ) );
//
imwrite(argv[2],blurImg);
//
HANDLE_ERROR(hipFree(dev_img));
HANDLE_ERROR(hipFree(dev_B));
HANDLE_ERROR(hipFree(dev_G));
HANDLE_ERROR(hipFree(dev_R));
HANDLE_ERROR(hipFree(dev_img2));
HANDLE_ERROR(hipFree(dev_B2));
HANDLE_ERROR(hipFree(dev_G2));
HANDLE_ERROR(hipFree(dev_R2));
HANDLE_ERROR(hipFree(conv_kernel));
return 0;
}
| e16149607841e429168238ea70af0ed2f31115bd.cu | /**
! nvcc RGB2Gray.cu -o main `pkg-config opencv --cflags --libs`
! ldd mian // 查看缺失什么库
! cp -r /usr/local/lib/libopencv* /lib // 将库直接拷到/lib
*/
#include <iostream>
#include <cuda.h>
// #include <cuda_runtime.h>
// #include <cuda_runtime_api.h>
#include <cmath>
#include <ctime>
#include <opencv2/opencv.hpp>
#include "common/book.h"
// #include "common/image.h"
#define mycout cout<<"["<<__FILE__<<":"<<__LINE__<<"] "
/* 全局线程id get thread id: 1D block and 2D grid <<<(32,32),32>>>*/
#define get_tid() (blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x) + threadIdx.x) // 2D grid,1D block
// #define get_tid() (blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x) + threadIdx.x+threadIdx.y*blockDim.x) // 2D grid,2D block
/* get block id: 2D grid */
#define get_bid() (blockIdx.x + blockIdx.y * gridDim.x)
/* 每个block的线程id*/
// #define get_tid_per_block() (threadIdx.x+threadIdx.y*blockDim.x) // 2D block
#define get_tid_per_block() (threadIdx.x)
#define get_ptr(image) ((unsigned char*)image.data)
#define image_size(image) (image.cols * image.rows)
using namespace std;
using namespace cv;
typedef float FLOAT;
__device__
unsigned char FLOAT2uchar(FLOAT value)
{
if(value < 0)
value = 0;
else if(value > 255)
value = 255;
return (unsigned char)value;
//return saturate_cast<unsigned char>(value);
}
__global__
void split_channel(unsigned char *dev_img,unsigned char *dev_B,
unsigned char *dev_G,unsigned char *dev_R,const int height,const int width)
{
int idx=get_tid();
if (idx>=height*width) return;
// 根据idx 反算出image的行与列
//int row=idx/width;
//int col=idx%width;
// opencv默认是BGR格式
dev_B[idx]=dev_img[idx*3+0];
dev_G[idx]=dev_img[idx*3+1];
dev_R[idx]=dev_img[idx*3+2];
}
__global__
void blur(unsigned char *dev_B,unsigned char *dev_B2,FLOAT *conv_kernel,
const int height,const int width,const int kernel_size)
{
int idx=get_tid();
if (idx>=height*width) return;
// 根据idx 反算出image的行与列
int row=idx/width;
int col=idx%width;
unsigned char img_value=0;
FLOAT tmp_value=0;
int cur_row=0;
int cur_col=0;
for(int i=0;i<kernel_size;++i)
{
for(int j=0;j<kernel_size;++j)
{
// 找到卷积核左上角的对应的像素坐标
cur_row=row-kernel_size/2+i;
cur_col=col-kernel_size/2+j;
if(cur_row<0 || cur_col<0 || cur_row>=height || cur_col>=width)
{
img_value=0;
}
else
{
// 反算对应的全局坐标
img_value=dev_B[cur_row*width+cur_col];
}
tmp_value+=img_value*conv_kernel[j+i*kernel_size]; // 与对应的卷积核上的值相乘
}
}
// dev_B2[idx]=(unsigned char)tmp_value; // 直接这么转有问题 有可能 负数变成 255
dev_B2[idx]=FLOAT2uchar(tmp_value);
}
__global__
void concat_channel(unsigned char *dev_B2,unsigned char *dev_G2,unsigned char *dev_R2,
unsigned char *dev_img,const int height,const int width)
{
int idx=get_tid();
if (idx>=height*width) return;
// 根据idx 反算出image的行与列
// int row=idx/width;
// int col=idx%width;
// opencv默认是BGR格式
dev_img[idx*3+0]=dev_B2[idx];
dev_img[idx*3+1]=dev_G2[idx];
dev_img[idx*3+2]=dev_R2[idx];
}
int main(int argc,char* argv[])
{
mycout<<"卷积实现blur \n"<<
"1、将BGR图像拆分成3个通道\n"<<
"2、每个通道分别做卷积\n"<<
"3、将3个通道合并得最终的结果\n"
"输入格式: ./main xxxx.jpg xxxx.jpg"<<endl;
if(argc<3) return -1;
// 打开图片
Mat img = imread(argv[1], IMREAD_COLOR);
if (img.empty())
{
mycout <<"load image fail"<<endl;
return -1;
}
// 卷积核
const int kernel_size=3;
FLOAT tmp_kernel[]={0,-1,0,-1,5,-1,0,-1,0};
FLOAT *conv_kernel=NULL;
// 使用统一内存 (同时被CPU与GPU访问)
//HANDLE_ERROR(cudaMallocManaged((void**)&conv_kernel,kernel_size*kernel_size*sizeof(FLOAT),
// cudaMemAttachGlobal));
// conv_kernel=tmp_kernel;
HANDLE_ERROR( cudaMalloc( (void**)&conv_kernel,kernel_size*kernel_size*sizeof(FLOAT) ) );
HANDLE_ERROR( cudaMemcpy( conv_kernel,tmp_kernel,kernel_size*kernel_size*sizeof(FLOAT) ,cudaMemcpyHostToDevice ) );
/**============使用GPU将图像拆分成3个通道============================*/
Mat B=Mat::zeros(img.size(),CV_8UC1);
Mat G=Mat::zeros(img.size(),CV_8UC1);
Mat R=Mat::zeros(img.size(),CV_8UC1);
// 分配GPU内存
unsigned char *dev_img=NULL,*dev_B=NULL,*dev_G=NULL,*dev_R=NULL;
HANDLE_ERROR( cudaMalloc( (void**)&dev_img,image_size(img)*3 ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_B,image_size(img) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_G,image_size(img) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_R,image_size(img) ) );
HANDLE_ERROR( cudaMemcpy( dev_img,img.data,image_size(img)*3 ,cudaMemcpyHostToDevice ) );
// 调用GPU核函数
dim3 grid(img.rows,img.cols);
split_channel<<<grid,1>>>(dev_img,dev_B,dev_G,dev_R,img.rows,img.cols);
/**============3个通道分别做blur============================*/
unsigned char *dev_B2=NULL,*dev_G2=NULL,*dev_R2=NULL;
HANDLE_ERROR( cudaMalloc( (void**)&dev_B2,image_size(img) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_G2,image_size(img) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_R2,image_size(img) ) );
// 调用GPU核函数
blur<<<grid,1>>>(dev_B,dev_B2,conv_kernel,img.rows,img.cols,kernel_size);
blur<<<grid,1>>>(dev_G,dev_G2,conv_kernel,img.rows,img.cols,kernel_size);
blur<<<grid,1>>>(dev_R,dev_R2,conv_kernel,img.rows,img.cols,kernel_size);
/**============使用3个通道合并============================*/
unsigned char *dev_img2=NULL;
HANDLE_ERROR( cudaMalloc( (void**)&dev_img2,image_size(img)*3 ) );
concat_channel<<<grid,1>>>(dev_B2,dev_G2,dev_R2,dev_img2,img.rows,img.cols);
// GPU -->CPU
// 创建一个空的Mat
Mat blurImg=Mat::zeros(img.size(),CV_8UC3);
HANDLE_ERROR( cudaMemcpy( blurImg.data,dev_img2,image_size(img)*3,cudaMemcpyDeviceToHost ) );
// 保存结果
imwrite(argv[2],blurImg);
// 释放内存
HANDLE_ERROR(cudaFree(dev_img));
HANDLE_ERROR(cudaFree(dev_B));
HANDLE_ERROR(cudaFree(dev_G));
HANDLE_ERROR(cudaFree(dev_R));
HANDLE_ERROR(cudaFree(dev_img2));
HANDLE_ERROR(cudaFree(dev_B2));
HANDLE_ERROR(cudaFree(dev_G2));
HANDLE_ERROR(cudaFree(dev_R2));
HANDLE_ERROR(cudaFree(conv_kernel));
return 0;
}
|
63ad22c379755f207564939f645d1c7cde9891db.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
__global__ void add(char *a, char *b,int size1,int size2,int *c) {
int id = blockIdx.x;
if(id+size2 > size1) return;
int i;
for(i=0;i<size2;i++){
if(a[id+i]!=b[i]) break;
}
if(i==size2 && (id==0 || a[id-1]==' ') && (id+size2 == size1 || a[id+size2]==' ')){
c[id] = 1;
return;
}
c[id] = 0;
}
int main4(void) {
char a[50],b[50];
char *d_a, *d_b;
int c[50]; int *d_c;
gets(a);
scanf("%s",b);
int size1 = sizeof(char)*strlen(a);
int size2 = sizeof(char)*strlen(b);
hipMalloc((void **)&d_a, size1);
hipMalloc((void **)&d_b, size2);
hipMalloc((void **)&d_c,sizeof(int)*size1);
hipMemcpy(d_a, a, size1, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size2, hipMemcpyHostToDevice);hipLaunchKernelGGL((
add), dim3(size1),dim3(1), 0, 0, d_a, d_b,size1,size2,d_c);
hipMemcpy(c,d_c, sizeof(int)*size1, hipMemcpyDeviceToHost);
int cs = 0;
for(int i=0;i<size1;i++) if(c[i]==1) cs++;
printf("%d",cs);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
| 63ad22c379755f207564939f645d1c7cde9891db.cu | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
__global__ void add(char *a, char *b,int size1,int size2,int *c) {
int id = blockIdx.x;
if(id+size2 > size1) return;
int i;
for(i=0;i<size2;i++){
if(a[id+i]!=b[i]) break;
}
if(i==size2 && (id==0 || a[id-1]==' ') && (id+size2 == size1 || a[id+size2]==' ')){
c[id] = 1;
return;
}
c[id] = 0;
}
int main4(void) {
char a[50],b[50];
char *d_a, *d_b;
int c[50]; int *d_c;
gets(a);
scanf("%s",b);
int size1 = sizeof(char)*strlen(a);
int size2 = sizeof(char)*strlen(b);
cudaMalloc((void **)&d_a, size1);
cudaMalloc((void **)&d_b, size2);
cudaMalloc((void **)&d_c,sizeof(int)*size1);
cudaMemcpy(d_a, a, size1, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size2, cudaMemcpyHostToDevice);
add<<<size1,1>>>(d_a, d_b,size1,size2,d_c);
cudaMemcpy(c,d_c, sizeof(int)*size1, cudaMemcpyDeviceToHost);
int cs = 0;
for(int i=0;i<size1;i++) if(c[i]==1) cs++;
printf("%d",cs);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
418f2bbeca240922bef8b202b8f9e557963e7592.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <time.h>
__global__ void kernel(int* count_d, float* randomnums)
{
int i;
double x,y,z;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
i = tid;
int xidx = 0, yidx = 0;
xidx = (i+i);
yidx = (xidx+1);
x = randomnums[xidx];
y = randomnums[yidx];
z = ((x*x)+(y*y));
if (z<=1)
count_d[tid] = 1;
else
count_d[tid] = 0;
}
void CUDAErrorCheck()
{
hipError_t error = hipGetLastError();
if (error != hipSuccess)
{
printf("CUDA error : %s (%d)\n", hipGetErrorString(error), error);
exit(0);
}
}
int main(int argc,char* argv[])
{
int niter = 100000;
float *randomnums;
double pi;
hipMalloc((void**)&randomnums, (2*niter)*sizeof(float));
// Use CuRand to generate an array of random numbers on the device
int status;
hiprandGenerator_t gen;
status = hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_MRG32K3A);
status |= hiprandSetPseudoRandomGeneratorSeed(gen, 4294967296ULL^time(NULL));
status |= hiprandGenerateUniform(gen, randomnums, (2*niter));
status |= hiprandDestroyGenerator(gen);
if (status != HIPRAND_STATUS_SUCCESS)
{
printf("CuRand Failure\n");
exit(EXIT_FAILURE);
}
int threads = 1000;
int blocks = 100;
int* count_d;
int *count = (int*)malloc(blocks*threads*sizeof(int));
unsigned int reducedcount = 0;
hipMalloc((void**)&count_d, (blocks*threads)*sizeof(int));
CUDAErrorCheck();
//one point per thread
hipLaunchKernelGGL(( kernel) , dim3(blocks), dim3(threads), 0, 0, count_d, randomnums);
hipDeviceSynchronize();
CUDAErrorCheck();
hipMemcpy(count, count_d, blocks*threads*sizeof(int), hipMemcpyDeviceToHost);
int i = 0;
//reduce array into int
for(i = 0; i<niter; i++)
reducedcount += count[i];
hipFree(randomnums);
hipFree(count_d);
free(count);
pi = ((double)reducedcount/niter)*4.0;
printf("Pi: %f\n", pi);
return 0;
}
| 418f2bbeca240922bef8b202b8f9e557963e7592.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand.h>
#include <time.h>
__global__ void kernel(int* count_d, float* randomnums)
{
int i;
double x,y,z;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
i = tid;
int xidx = 0, yidx = 0;
xidx = (i+i);
yidx = (xidx+1);
x = randomnums[xidx];
y = randomnums[yidx];
z = ((x*x)+(y*y));
if (z<=1)
count_d[tid] = 1;
else
count_d[tid] = 0;
}
void CUDAErrorCheck()
{
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("CUDA error : %s (%d)\n", cudaGetErrorString(error), error);
exit(0);
}
}
int main(int argc,char* argv[])
{
int niter = 100000;
float *randomnums;
double pi;
cudaMalloc((void**)&randomnums, (2*niter)*sizeof(float));
// Use CuRand to generate an array of random numbers on the device
int status;
curandGenerator_t gen;
status = curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MRG32K3A);
status |= curandSetPseudoRandomGeneratorSeed(gen, 4294967296ULL^time(NULL));
status |= curandGenerateUniform(gen, randomnums, (2*niter));
status |= curandDestroyGenerator(gen);
if (status != CURAND_STATUS_SUCCESS)
{
printf("CuRand Failure\n");
exit(EXIT_FAILURE);
}
int threads = 1000;
int blocks = 100;
int* count_d;
int *count = (int*)malloc(blocks*threads*sizeof(int));
unsigned int reducedcount = 0;
cudaMalloc((void**)&count_d, (blocks*threads)*sizeof(int));
CUDAErrorCheck();
//one point per thread
kernel <<<blocks, threads>>> (count_d, randomnums);
cudaDeviceSynchronize();
CUDAErrorCheck();
cudaMemcpy(count, count_d, blocks*threads*sizeof(int), cudaMemcpyDeviceToHost);
int i = 0;
//reduce array into int
for(i = 0; i<niter; i++)
reducedcount += count[i];
cudaFree(randomnums);
cudaFree(count_d);
free(count);
pi = ((double)reducedcount/niter)*4.0;
printf("Pi: %f\n", pi);
return 0;
}
|
7b1ed21dc2f59b1f06d72293acea9a3898e12e4a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <cstring>
#include <stdlib.h>
#include <stdio.h>
#include <string>
#include "gloveparser.cuh"
float* parseFile(char* path, int& n, int& dimensions) {
FILE *fp;
float* list;
fp = fopen(path, "r");
if (fp == NULL) {
printf("Error while opening file: %s. \n", path);
exit(-1);
}
bool number_is_negative = false; //Checks whether it is a negative number.
int ch = 0;
int comma_counter = 0; //The placement of the next comma digit.
int index = 0; //The index in the matrix.
bool comma = false; //Whether a comma has been registered yet.
bool isID = true;
float x = 0;
char _n[256], d[256];
fgets(_n, sizeof(_n), fp);
fgets(d, sizeof(d), fp);
n = atoi(_n);
dimensions = atoi(d);
list = (float*)malloc(n * dimensions * sizeof(float));
while ((ch = fgetc(fp)) != EOF) {
while (ch != 10 && ch != EOF) {
if (ch == 32) {
if (isID) { //Just continue after this. Otherwise we will be adding an unnecassary 0.
isID = false;
}
else { //Add number.
if (number_is_negative) x = x * -1.0; //Check if negative.
//printf("Adding to list[%d] value %f \n", index, x);
list[index] = x;
//Reset values.
x = 0;
index++;
comma = false;
number_is_negative = false;
comma_counter = 0;
}
}
else if (isID) { // Is the number the ID.
//Id is omitted for now.
}
else if (isdigit(ch)) { //Is it a number.
if (comma) { //If comma, than compute correct digit.
double digit = (ch - 48.0) / pow(10, comma_counter);
x = x + digit;
comma_counter++;
}
else { //Otherwise just assign number.
x = ch - 48.0;
}
}
else if (ch == 45) { //Set negative flag.
number_is_negative = true;
}
else if (ch == 46) { //Set comma flag.
comma = true;
comma_counter++;
}
ch = fgetc(fp); //Get next character.
}
x = 0;
comma = false;
comma_counter = 0;
isID = true;
}
fclose(fp);
return list;
} | 7b1ed21dc2f59b1f06d72293acea9a3898e12e4a.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstring>
#include <stdlib.h>
#include <stdio.h>
#include <string>
#include "gloveparser.cuh"
float* parseFile(char* path, int& n, int& dimensions) {
FILE *fp;
float* list;
fp = fopen(path, "r");
if (fp == NULL) {
printf("Error while opening file: %s. \n", path);
exit(-1);
}
bool number_is_negative = false; //Checks whether it is a negative number.
int ch = 0;
int comma_counter = 0; //The placement of the next comma digit.
int index = 0; //The index in the matrix.
bool comma = false; //Whether a comma has been registered yet.
bool isID = true;
float x = 0;
char _n[256], d[256];
fgets(_n, sizeof(_n), fp);
fgets(d, sizeof(d), fp);
n = atoi(_n);
dimensions = atoi(d);
list = (float*)malloc(n * dimensions * sizeof(float));
while ((ch = fgetc(fp)) != EOF) {
while (ch != 10 && ch != EOF) {
if (ch == 32) {
if (isID) { //Just continue after this. Otherwise we will be adding an unnecassary 0.
isID = false;
}
else { //Add number.
if (number_is_negative) x = x * -1.0; //Check if negative.
//printf("Adding to list[%d] value %f \n", index, x);
list[index] = x;
//Reset values.
x = 0;
index++;
comma = false;
number_is_negative = false;
comma_counter = 0;
}
}
else if (isID) { // Is the number the ID.
//Id is omitted for now.
}
else if (isdigit(ch)) { //Is it a number.
if (comma) { //If comma, than compute correct digit.
double digit = (ch - 48.0) / pow(10, comma_counter);
x = x + digit;
comma_counter++;
}
else { //Otherwise just assign number.
x = ch - 48.0;
}
}
else if (ch == 45) { //Set negative flag.
number_is_negative = true;
}
else if (ch == 46) { //Set comma flag.
comma = true;
comma_counter++;
}
ch = fgetc(fp); //Get next character.
}
x = 0;
comma = false;
comma_counter = 0;
isID = true;
}
fclose(fp);
return list;
} |
2299fbac340f88b217a470b0a78f9243f8969903.hip | // !!! This is a file automatically generated by hipify!!!
#include "Solver.h"
using namespace std ;
/*
vector<double> Solver::solve3Diag(const vector <double> & lDiag, const vector <double> & diag, const vector <double> & uDiag,
const vector <double> & rHS) {
// --- Initialize cuSPARSE
hipsparseHandle_t handle; hipsparseCreate(&handle);
const int N =diag.size(); // --- Size of the linear system
// --- Lower diagonal, diagonal and upper diagonal of the system matrix
double *h_ld = (double*)malloc(N * sizeof(double));
double *h_d = (double*)malloc(N * sizeof(double));
double *h_ud = (double*)malloc(N * sizeof(double));
double *h_x = (double *)malloc(N * sizeof(double));
for (int k = 0; k < N ; k++) {
h_ld[k] =lDiag.at(k);
h_d[k] =diag.at(k);
h_ud[k] =uDiag.at(k);
h_x[k] =rHS.at(k);
}
// for (int k = 0; k < N; k++)
double *d_ld; hipMalloc(&d_ld, N * sizeof(double));
double *d_d; hipMalloc(&d_d, N * sizeof(double));
double *d_ud; hipMalloc(&d_ud, N * sizeof(double));
double *d_x; hipMalloc(&d_x, N * sizeof(double));
cout << "lower diagonal elements" << endl ;
for (int k=0; k<N; k++) printf("%f\n", h_ld[k]);
cout << "diagonal elements" << endl ;
for (int k=0; k<N; k++) {
cout << k <<", " <<h_d[k] << endl ;
}
cout << "upper diagonal elements" << endl ;
for (int k=0; k<N; k++) printf("%f\n", h_ud[k]);
cout << "RHS elements" << endl ;
for (int k=0; k<N; k++) printf("%f\n", h_x[k]);
hipMemcpy(d_ld, h_ld, N * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_d, h_d, N * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_ud, h_ud, N * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_x, h_x, N * sizeof(double), hipMemcpyHostToDevice);
// --- Allocating and defining dense host and device data vectors
// h_x[0] = 100.0; h_x[1] = 200.0; h_x[2] = 400.0; h_x[3] = 500.0; h_x[4] = 300.0;
// --- Allocating the host and device side result vector
//double *h_y = (double *)malloc(N * sizeof(double));
//double *d_y; hipMalloc(&d_y, N * sizeof(double));
cusparseDgtsv(handle, N, 1, d_ld, d_d, d_ud, d_x, N);
hipMemcpy(h_x, d_x, N * sizeof(double), hipMemcpyDeviceToHost);
cout << "results" << endl ;
for (int k=0; k<N; k++) printf("%f\n", h_x[k]);
cout << "finished results" << endl ;
vector < double> ans ;
for (int k=0; k<N; k++) {
ans.push_back(h_x[k]);
}
return ans ;
}
*/
vector<double> Solver::SOR3DiagPeriodic(const vector <bool> & nodeIsActive, const vector <double> & lDiag, const vector <double> & diag, const vector <double> & uDiag,
const vector <double> & rHS,
const vector <int> & prevIndex,
const vector <int> & nextIndex,
vector<double> & firstGuess) {
const int maxIteration=2500 ;
const double beta=1.2 ;
vector <double> ans =firstGuess ;
vector<double> ansOld=firstGuess ;
double maxError=10000 ; // Just a big number to go inside while loop for the fist time.
const int N =diag.size(); // --- Size of the linear system
//Simple iterative SOR solver
int numIterator=0 ;
while (maxError>1.0E-6 && numIterator<maxIteration) {
maxError=0 ;
numIterator ++ ;
for (int i=0; i<N ; i++) {
if (!nodeIsActive.at(i)) {
continue ;
}
ansOld.at(i)=ans.at(i) ;
ans.at(i)=beta*(rHS.at(i)-lDiag.at(i)*ans[prevIndex.at(i)]-uDiag.at(i)*ans[nextIndex.at(i)])/diag.at(i)+ (1-beta)*ansOld.at(i);
if ( abs (ansOld.at(i)-ans.at(i)) >maxError) {
maxError=abs (ansOld.at(i)-ans.at(i));
}
}
}
cout << "In SOR solver after " << numIterator <<" iteration, maximum difference in two successuve iterations is "<< maxError << endl ; ;
return ans;
}
| 2299fbac340f88b217a470b0a78f9243f8969903.cu | #include "Solver.h"
using namespace std ;
/*
vector<double> Solver::solve3Diag(const vector <double> & lDiag, const vector <double> & diag, const vector <double> & uDiag,
const vector <double> & rHS) {
// --- Initialize cuSPARSE
cusparseHandle_t handle; cusparseCreate(&handle);
const int N =diag.size(); // --- Size of the linear system
// --- Lower diagonal, diagonal and upper diagonal of the system matrix
double *h_ld = (double*)malloc(N * sizeof(double));
double *h_d = (double*)malloc(N * sizeof(double));
double *h_ud = (double*)malloc(N * sizeof(double));
double *h_x = (double *)malloc(N * sizeof(double));
for (int k = 0; k < N ; k++) {
h_ld[k] =lDiag.at(k);
h_d[k] =diag.at(k);
h_ud[k] =uDiag.at(k);
h_x[k] =rHS.at(k);
}
// for (int k = 0; k < N; k++)
double *d_ld; cudaMalloc(&d_ld, N * sizeof(double));
double *d_d; cudaMalloc(&d_d, N * sizeof(double));
double *d_ud; cudaMalloc(&d_ud, N * sizeof(double));
double *d_x; cudaMalloc(&d_x, N * sizeof(double));
cout << "lower diagonal elements" << endl ;
for (int k=0; k<N; k++) printf("%f\n", h_ld[k]);
cout << "diagonal elements" << endl ;
for (int k=0; k<N; k++) {
cout << k <<", " <<h_d[k] << endl ;
}
cout << "upper diagonal elements" << endl ;
for (int k=0; k<N; k++) printf("%f\n", h_ud[k]);
cout << "RHS elements" << endl ;
for (int k=0; k<N; k++) printf("%f\n", h_x[k]);
cudaMemcpy(d_ld, h_ld, N * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_d, h_d, N * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_ud, h_ud, N * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_x, h_x, N * sizeof(double), cudaMemcpyHostToDevice);
// --- Allocating and defining dense host and device data vectors
// h_x[0] = 100.0; h_x[1] = 200.0; h_x[2] = 400.0; h_x[3] = 500.0; h_x[4] = 300.0;
// --- Allocating the host and device side result vector
//double *h_y = (double *)malloc(N * sizeof(double));
//double *d_y; cudaMalloc(&d_y, N * sizeof(double));
cusparseDgtsv(handle, N, 1, d_ld, d_d, d_ud, d_x, N);
cudaMemcpy(h_x, d_x, N * sizeof(double), cudaMemcpyDeviceToHost);
cout << "results" << endl ;
for (int k=0; k<N; k++) printf("%f\n", h_x[k]);
cout << "finished results" << endl ;
vector < double> ans ;
for (int k=0; k<N; k++) {
ans.push_back(h_x[k]);
}
return ans ;
}
*/
vector<double> Solver::SOR3DiagPeriodic(const vector <bool> & nodeIsActive, const vector <double> & lDiag, const vector <double> & diag, const vector <double> & uDiag,
const vector <double> & rHS,
const vector <int> & prevIndex,
const vector <int> & nextIndex,
vector<double> & firstGuess) {
const int maxIteration=2500 ;
const double beta=1.2 ;
vector <double> ans =firstGuess ;
vector<double> ansOld=firstGuess ;
double maxError=10000 ; // Just a big number to go inside while loop for the fist time.
const int N =diag.size(); // --- Size of the linear system
//Simple iterative SOR solver
int numIterator=0 ;
while (maxError>1.0E-6 && numIterator<maxIteration) {
maxError=0 ;
numIterator ++ ;
for (int i=0; i<N ; i++) {
if (!nodeIsActive.at(i)) {
continue ;
}
ansOld.at(i)=ans.at(i) ;
ans.at(i)=beta*(rHS.at(i)-lDiag.at(i)*ans[prevIndex.at(i)]-uDiag.at(i)*ans[nextIndex.at(i)])/diag.at(i)+ (1-beta)*ansOld.at(i);
if ( abs (ansOld.at(i)-ans.at(i)) >maxError) {
maxError=abs (ansOld.at(i)-ans.at(i));
}
}
}
cout << "In SOR solver after " << numIterator <<" iteration, maximum difference in two successuve iterations is "<< maxError << endl ; ;
return ans;
}
|
3b8291f1b977eb846a324cfb6408cd862fcde496.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "replace.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *input_T = NULL;
hipMalloc(&input_T, XSIZE*YSIZE);
int *output_T = NULL;
hipMalloc(&output_T, XSIZE*YSIZE);
int *prefix_T = NULL;
hipMalloc(&prefix_T, XSIZE*YSIZE);
int *prefix_helper_T = NULL;
hipMalloc(&prefix_helper_T, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int k = 1;
int blockPower = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
replace), dim3(gridBlock),dim3(threadBlock), 0, 0, input_T,output_T,prefix_T,prefix_helper_T,n,k,blockPower);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
replace), dim3(gridBlock),dim3(threadBlock), 0, 0, input_T,output_T,prefix_T,prefix_helper_T,n,k,blockPower);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
replace), dim3(gridBlock),dim3(threadBlock), 0, 0, input_T,output_T,prefix_T,prefix_helper_T,n,k,blockPower);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 3b8291f1b977eb846a324cfb6408cd862fcde496.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "replace.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *input_T = NULL;
cudaMalloc(&input_T, XSIZE*YSIZE);
int *output_T = NULL;
cudaMalloc(&output_T, XSIZE*YSIZE);
int *prefix_T = NULL;
cudaMalloc(&prefix_T, XSIZE*YSIZE);
int *prefix_helper_T = NULL;
cudaMalloc(&prefix_helper_T, XSIZE*YSIZE);
int n = XSIZE*YSIZE;
int k = 1;
int blockPower = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
replace<<<gridBlock,threadBlock>>>(input_T,output_T,prefix_T,prefix_helper_T,n,k,blockPower);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
replace<<<gridBlock,threadBlock>>>(input_T,output_T,prefix_T,prefix_helper_T,n,k,blockPower);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
replace<<<gridBlock,threadBlock>>>(input_T,output_T,prefix_T,prefix_helper_T,n,k,blockPower);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
b3ef67e23dfd8aa744997f1be7d9be81bee3444c.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <linalg/add.cuh>
#include <random/rng.cuh>
#include "add_hip.cuh"
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename InT, typename OutT = InT>
class AddTest : public ::testing::TestWithParam<AddInputs<InT, OutT>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<AddInputs<InT, OutT>>::GetParam();
raft::random::Rng r(params.seed);
int len = params.len;
CUDA_CHECK(hipStreamCreate(&stream));
allocate(in1, len);
allocate(in2, len);
allocate(out_ref, len);
allocate(out, len);
r.uniform(in1, len, InT(-1.0), InT(1.0), stream);
r.uniform(in2, len, InT(-1.0), InT(1.0), stream);
naiveAddElem<InT, OutT>(out_ref, in1, in2, len);
add<InT, OutT>(out, in1, in2, len, stream);
}
void TearDown() override {
CUDA_CHECK(hipStreamSynchronize(stream));
CUDA_CHECK(hipFree(in1));
CUDA_CHECK(hipFree(in2));
CUDA_CHECK(hipFree(out_ref));
CUDA_CHECK(hipFree(out));
CUDA_CHECK(hipStreamDestroy(stream));
}
void compare() {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<OutT>(params.tolerance)));
}
protected:
AddInputs<InT, OutT> params;
InT *in1, *in2;
OutT *out_ref, *out;
hipStream_t stream;
};
const std::vector<AddInputs<float>> inputsf = {
{0.000001f, 1024 * 1024, 1234ULL},
{0.000001f, 1024 * 1024 + 2, 1234ULL},
{0.000001f, 1024 * 1024 + 1, 1234ULL},
};
typedef AddTest<float> AddTestF;
TEST_P(AddTestF, Result) { compare(); }
INSTANTIATE_TEST_CASE_P(AddTests, AddTestF, ::testing::ValuesIn(inputsf));
const std::vector<AddInputs<double>> inputsd = {
{0.00000001, 1024 * 1024, 1234ULL},
{0.00000001, 1024 * 1024 + 2, 1234ULL},
{0.00000001, 1024 * 1024 + 1, 1234ULL},
};
typedef AddTest<double> AddTestD;
TEST_P(AddTestD, Result) { compare(); }
INSTANTIATE_TEST_CASE_P(AddTests, AddTestD, ::testing::ValuesIn(inputsd));
const std::vector<AddInputs<float, double>> inputsfd = {
{0.00000001, 1024 * 1024, 1234ULL},
{0.00000001, 1024 * 1024 + 2, 1234ULL},
{0.00000001, 1024 * 1024 + 1, 1234ULL},
};
typedef AddTest<float, double> AddTestFD;
TEST_P(AddTestFD, Result) { compare(); }
INSTANTIATE_TEST_CASE_P(AddTests, AddTestFD, ::testing::ValuesIn(inputsfd));
} // end namespace LinAlg
} // end namespace MLCommon
| b3ef67e23dfd8aa744997f1be7d9be81bee3444c.cu | /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <linalg/add.cuh>
#include <random/rng.cuh>
#include "add.cuh"
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename InT, typename OutT = InT>
class AddTest : public ::testing::TestWithParam<AddInputs<InT, OutT>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<AddInputs<InT, OutT>>::GetParam();
raft::random::Rng r(params.seed);
int len = params.len;
CUDA_CHECK(cudaStreamCreate(&stream));
allocate(in1, len);
allocate(in2, len);
allocate(out_ref, len);
allocate(out, len);
r.uniform(in1, len, InT(-1.0), InT(1.0), stream);
r.uniform(in2, len, InT(-1.0), InT(1.0), stream);
naiveAddElem<InT, OutT>(out_ref, in1, in2, len);
add<InT, OutT>(out, in1, in2, len, stream);
}
void TearDown() override {
CUDA_CHECK(cudaStreamSynchronize(stream));
CUDA_CHECK(cudaFree(in1));
CUDA_CHECK(cudaFree(in2));
CUDA_CHECK(cudaFree(out_ref));
CUDA_CHECK(cudaFree(out));
CUDA_CHECK(cudaStreamDestroy(stream));
}
void compare() {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<OutT>(params.tolerance)));
}
protected:
AddInputs<InT, OutT> params;
InT *in1, *in2;
OutT *out_ref, *out;
cudaStream_t stream;
};
const std::vector<AddInputs<float>> inputsf = {
{0.000001f, 1024 * 1024, 1234ULL},
{0.000001f, 1024 * 1024 + 2, 1234ULL},
{0.000001f, 1024 * 1024 + 1, 1234ULL},
};
typedef AddTest<float> AddTestF;
TEST_P(AddTestF, Result) { compare(); }
INSTANTIATE_TEST_CASE_P(AddTests, AddTestF, ::testing::ValuesIn(inputsf));
const std::vector<AddInputs<double>> inputsd = {
{0.00000001, 1024 * 1024, 1234ULL},
{0.00000001, 1024 * 1024 + 2, 1234ULL},
{0.00000001, 1024 * 1024 + 1, 1234ULL},
};
typedef AddTest<double> AddTestD;
TEST_P(AddTestD, Result) { compare(); }
INSTANTIATE_TEST_CASE_P(AddTests, AddTestD, ::testing::ValuesIn(inputsd));
const std::vector<AddInputs<float, double>> inputsfd = {
{0.00000001, 1024 * 1024, 1234ULL},
{0.00000001, 1024 * 1024 + 2, 1234ULL},
{0.00000001, 1024 * 1024 + 1, 1234ULL},
};
typedef AddTest<float, double> AddTestFD;
TEST_P(AddTestFD, Result) { compare(); }
INSTANTIATE_TEST_CASE_P(AddTests, AddTestFD, ::testing::ValuesIn(inputsfd));
} // end namespace LinAlg
} // end namespace MLCommon
|
57417948bbb895a33046352a45f600ce51e6ab60.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void matmul_kernel(float *C, float *A, float *B) {
__shared__ float sA[block_size_y*tile_size_y][block_size_x];
__shared__ float sB[block_size_y*tile_size_y][block_size_x * tile_size_x];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * block_size_x * tile_size_x + threadIdx.x;
int y = blockIdx.y * block_size_y * tile_size_y + threadIdx.y;
int k, kb;
float sum[tile_size_y][tile_size_x];
#pragma unroll
for (int i = 0; i < tile_size_y; i++) {
#pragma unroll
for (int j = 0; j < tile_size_x; j++) {
sum[i][j] = 0.0f;
}
}
for (k = 0; k < WIDTH; k += block_size_x) {
__syncthreads();
#pragma unroll
for (int i = 0; i < tile_size_y; i++) {
sA[ty + block_size_y * i][tx] = A[(y+i*block_size_y) * WIDTH + k + tx];
#pragma unroll
for (int j = 0; j < tile_size_x; j++) {
sB[ty + block_size_y * i][tx + j * block_size_x] = B[(k + ty + block_size_y * i) * WIDTH + x + j * block_size_x];
}
}
__syncthreads();
//compute
#pragma unroll
for (kb = 0; kb < block_size_x; kb++) {
#pragma unroll
for (int i = 0; i < tile_size_y; i++) {
#pragma unroll
for (int j = 0; j < tile_size_x; j++) {
sum[i][j] += sA[ty + block_size_y * i][kb] * sB[kb][tx + j * block_size_x];
}
}
}
}
//store result
#pragma unroll
for (int i = 0; i < tile_size_y; i++) {
#pragma unroll
for (int j = 0; j < tile_size_x; j++) {
C[y * WIDTH + x + block_size_y * i * WIDTH + j * block_size_x] = sum[i][j];
}
}
} | 57417948bbb895a33046352a45f600ce51e6ab60.cu | #include "includes.h"
__global__ void matmul_kernel(float *C, float *A, float *B) {
__shared__ float sA[block_size_y*tile_size_y][block_size_x];
__shared__ float sB[block_size_y*tile_size_y][block_size_x * tile_size_x];
int tx = threadIdx.x;
int ty = threadIdx.y;
int x = blockIdx.x * block_size_x * tile_size_x + threadIdx.x;
int y = blockIdx.y * block_size_y * tile_size_y + threadIdx.y;
int k, kb;
float sum[tile_size_y][tile_size_x];
#pragma unroll
for (int i = 0; i < tile_size_y; i++) {
#pragma unroll
for (int j = 0; j < tile_size_x; j++) {
sum[i][j] = 0.0f;
}
}
for (k = 0; k < WIDTH; k += block_size_x) {
__syncthreads();
#pragma unroll
for (int i = 0; i < tile_size_y; i++) {
sA[ty + block_size_y * i][tx] = A[(y+i*block_size_y) * WIDTH + k + tx];
#pragma unroll
for (int j = 0; j < tile_size_x; j++) {
sB[ty + block_size_y * i][tx + j * block_size_x] = B[(k + ty + block_size_y * i) * WIDTH + x + j * block_size_x];
}
}
__syncthreads();
//compute
#pragma unroll
for (kb = 0; kb < block_size_x; kb++) {
#pragma unroll
for (int i = 0; i < tile_size_y; i++) {
#pragma unroll
for (int j = 0; j < tile_size_x; j++) {
sum[i][j] += sA[ty + block_size_y * i][kb] * sB[kb][tx + j * block_size_x];
}
}
}
}
//store result
#pragma unroll
for (int i = 0; i < tile_size_y; i++) {
#pragma unroll
for (int j = 0; j < tile_size_x; j++) {
C[y * WIDTH + x + block_size_y * i * WIDTH + j * block_size_x] = sum[i][j];
}
}
} |
d5a26b1e44b9bc8f901983e2f486288ba8d6b9bc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/opencv_modules.hpp"
#if defined(HAVE_OPENCV_GPU) && !defined(DYNAMIC_CUDA_SUPPORT)
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/transform.hpp"
#include "opencv2/gpu/device/vec_traits.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
using namespace cv::gpu;
using namespace cv::gpu::device;
namespace btv_l1_device
{
void buildMotionMaps(PtrStepSzf forwardMotionX, PtrStepSzf forwardMotionY,
PtrStepSzf backwardMotionX, PtrStepSzf bacwardMotionY,
PtrStepSzf forwardMapX, PtrStepSzf forwardMapY,
PtrStepSzf backwardMapX, PtrStepSzf backwardMapY);
template <int cn>
void upscale(const PtrStepSzb src, PtrStepSzb dst, int scale, hipStream_t stream);
void diffSign(PtrStepSzf src1, PtrStepSzf src2, PtrStepSzf dst, hipStream_t stream);
void loadBtvWeights(const float* weights, size_t count);
template <int cn> void calcBtvRegularization(PtrStepSzb src, PtrStepSzb dst, int ksize);
}
namespace btv_l1_device
{
__global__ void buildMotionMapsKernel(const PtrStepSzf forwardMotionX, const PtrStepf forwardMotionY,
PtrStepf backwardMotionX, PtrStepf backwardMotionY,
PtrStepf forwardMapX, PtrStepf forwardMapY,
PtrStepf backwardMapX, PtrStepf backwardMapY)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= forwardMotionX.cols || y >= forwardMotionX.rows)
return;
const float fx = forwardMotionX(y, x);
const float fy = forwardMotionY(y, x);
const float bx = backwardMotionX(y, x);
const float by = backwardMotionY(y, x);
forwardMapX(y, x) = x + bx;
forwardMapY(y, x) = y + by;
backwardMapX(y, x) = x + fx;
backwardMapY(y, x) = y + fy;
}
void buildMotionMaps(PtrStepSzf forwardMotionX, PtrStepSzf forwardMotionY,
PtrStepSzf backwardMotionX, PtrStepSzf bacwardMotionY,
PtrStepSzf forwardMapX, PtrStepSzf forwardMapY,
PtrStepSzf backwardMapX, PtrStepSzf backwardMapY)
{
const dim3 block(32, 8);
const dim3 grid(divUp(forwardMapX.cols, block.x), divUp(forwardMapX.rows, block.y));
hipLaunchKernelGGL(( buildMotionMapsKernel), dim3(grid), dim3(block), 0, 0, forwardMotionX, forwardMotionY,
backwardMotionX, bacwardMotionY,
forwardMapX, forwardMapY,
backwardMapX, backwardMapY);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
template <typename T>
__global__ void upscaleKernel(const PtrStepSz<T> src, PtrStep<T> dst, const int scale)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= src.cols || y >= src.rows)
return;
dst(y * scale, x * scale) = src(y, x);
}
template <int cn>
void upscale(const PtrStepSzb src, PtrStepSzb dst, int scale, hipStream_t stream)
{
typedef typename TypeVec<float, cn>::vec_type src_t;
const dim3 block(32, 8);
const dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
hipLaunchKernelGGL(( upscaleKernel<src_t>), dim3(grid), dim3(block), 0, stream, (PtrStepSz<src_t>) src, (PtrStepSz<src_t>) dst, scale);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template void upscale<1>(const PtrStepSzb src, PtrStepSzb dst, int scale, hipStream_t stream);
template void upscale<3>(const PtrStepSzb src, PtrStepSzb dst, int scale, hipStream_t stream);
template void upscale<4>(const PtrStepSzb src, PtrStepSzb dst, int scale, hipStream_t stream);
__device__ __forceinline__ float diffSign(float a, float b)
{
return a > b ? 1.0f : a < b ? -1.0f : 0.0f;
}
__device__ __forceinline__ float3 diffSign(const float3& a, const float3& b)
{
return make_float3(
a.x > b.x ? 1.0f : a.x < b.x ? -1.0f : 0.0f,
a.y > b.y ? 1.0f : a.y < b.y ? -1.0f : 0.0f,
a.z > b.z ? 1.0f : a.z < b.z ? -1.0f : 0.0f
);
}
__device__ __forceinline__ float4 diffSign(const float4& a, const float4& b)
{
return make_float4(
a.x > b.x ? 1.0f : a.x < b.x ? -1.0f : 0.0f,
a.y > b.y ? 1.0f : a.y < b.y ? -1.0f : 0.0f,
a.z > b.z ? 1.0f : a.z < b.z ? -1.0f : 0.0f,
0.0f
);
}
struct DiffSign : binary_function<float, float, float>
{
__device__ __forceinline__ float operator ()(float a, float b) const
{
return diffSign(a, b);
}
};
}
namespace cv { namespace gpu { namespace device
{
template <> struct TransformFunctorTraits<btv_l1_device::DiffSign> : DefaultTransformFunctorTraits<btv_l1_device::DiffSign>
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
}}}
namespace btv_l1_device
{
void diffSign(PtrStepSzf src1, PtrStepSzf src2, PtrStepSzf dst, hipStream_t stream)
{
transform(src1, src2, dst, DiffSign(), WithOutMask(), stream);
}
__constant__ float c_btvRegWeights[16*16];
template <typename T>
__global__ void calcBtvRegularizationKernel(const PtrStepSz<T> src, PtrStep<T> dst, const int ksize)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x + ksize;
const int y = blockIdx.y * blockDim.y + threadIdx.y + ksize;
if (y >= src.rows - ksize || x >= src.cols - ksize)
return;
const T srcVal = src(y, x);
T dstVal = VecTraits<T>::all(0);
for (int m = 0, count = 0; m <= ksize; ++m)
{
for (int l = ksize; l + m >= 0; --l, ++count)
dstVal = dstVal + c_btvRegWeights[count] * (diffSign(srcVal, src(y + m, x + l)) - diffSign(src(y - m, x - l), srcVal));
}
dst(y, x) = dstVal;
}
void loadBtvWeights(const float* weights, size_t count)
{
cudaSafeCall( hipMemcpyToSymbol(c_btvRegWeights, weights, count * sizeof(float)) );
}
template <int cn>
void calcBtvRegularization(PtrStepSzb src, PtrStepSzb dst, int ksize)
{
typedef typename TypeVec<float, cn>::vec_type src_t;
const dim3 block(32, 8);
const dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
hipLaunchKernelGGL(( calcBtvRegularizationKernel<src_t>), dim3(grid), dim3(block), 0, 0, (PtrStepSz<src_t>) src, (PtrStepSz<src_t>) dst, ksize);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
template void calcBtvRegularization<1>(PtrStepSzb src, PtrStepSzb dst, int ksize);
template void calcBtvRegularization<3>(PtrStepSzb src, PtrStepSzb dst, int ksize);
template void calcBtvRegularization<4>(PtrStepSzb src, PtrStepSzb dst, int ksize);
}
#endif /* HAVE_OPENCV_GPU */
| d5a26b1e44b9bc8f901983e2f486288ba8d6b9bc.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/opencv_modules.hpp"
#if defined(HAVE_OPENCV_GPU) && !defined(DYNAMIC_CUDA_SUPPORT)
#include "opencv2/gpu/device/common.hpp"
#include "opencv2/gpu/device/transform.hpp"
#include "opencv2/gpu/device/vec_traits.hpp"
#include "opencv2/gpu/device/vec_math.hpp"
using namespace cv::gpu;
using namespace cv::gpu::device;
namespace btv_l1_device
{
void buildMotionMaps(PtrStepSzf forwardMotionX, PtrStepSzf forwardMotionY,
PtrStepSzf backwardMotionX, PtrStepSzf bacwardMotionY,
PtrStepSzf forwardMapX, PtrStepSzf forwardMapY,
PtrStepSzf backwardMapX, PtrStepSzf backwardMapY);
template <int cn>
void upscale(const PtrStepSzb src, PtrStepSzb dst, int scale, cudaStream_t stream);
void diffSign(PtrStepSzf src1, PtrStepSzf src2, PtrStepSzf dst, cudaStream_t stream);
void loadBtvWeights(const float* weights, size_t count);
template <int cn> void calcBtvRegularization(PtrStepSzb src, PtrStepSzb dst, int ksize);
}
namespace btv_l1_device
{
__global__ void buildMotionMapsKernel(const PtrStepSzf forwardMotionX, const PtrStepf forwardMotionY,
PtrStepf backwardMotionX, PtrStepf backwardMotionY,
PtrStepf forwardMapX, PtrStepf forwardMapY,
PtrStepf backwardMapX, PtrStepf backwardMapY)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= forwardMotionX.cols || y >= forwardMotionX.rows)
return;
const float fx = forwardMotionX(y, x);
const float fy = forwardMotionY(y, x);
const float bx = backwardMotionX(y, x);
const float by = backwardMotionY(y, x);
forwardMapX(y, x) = x + bx;
forwardMapY(y, x) = y + by;
backwardMapX(y, x) = x + fx;
backwardMapY(y, x) = y + fy;
}
void buildMotionMaps(PtrStepSzf forwardMotionX, PtrStepSzf forwardMotionY,
PtrStepSzf backwardMotionX, PtrStepSzf bacwardMotionY,
PtrStepSzf forwardMapX, PtrStepSzf forwardMapY,
PtrStepSzf backwardMapX, PtrStepSzf backwardMapY)
{
const dim3 block(32, 8);
const dim3 grid(divUp(forwardMapX.cols, block.x), divUp(forwardMapX.rows, block.y));
buildMotionMapsKernel<<<grid, block>>>(forwardMotionX, forwardMotionY,
backwardMotionX, bacwardMotionY,
forwardMapX, forwardMapY,
backwardMapX, backwardMapY);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
template <typename T>
__global__ void upscaleKernel(const PtrStepSz<T> src, PtrStep<T> dst, const int scale)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= src.cols || y >= src.rows)
return;
dst(y * scale, x * scale) = src(y, x);
}
template <int cn>
void upscale(const PtrStepSzb src, PtrStepSzb dst, int scale, cudaStream_t stream)
{
typedef typename TypeVec<float, cn>::vec_type src_t;
const dim3 block(32, 8);
const dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
upscaleKernel<src_t><<<grid, block, 0, stream>>>((PtrStepSz<src_t>) src, (PtrStepSz<src_t>) dst, scale);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template void upscale<1>(const PtrStepSzb src, PtrStepSzb dst, int scale, cudaStream_t stream);
template void upscale<3>(const PtrStepSzb src, PtrStepSzb dst, int scale, cudaStream_t stream);
template void upscale<4>(const PtrStepSzb src, PtrStepSzb dst, int scale, cudaStream_t stream);
__device__ __forceinline__ float diffSign(float a, float b)
{
return a > b ? 1.0f : a < b ? -1.0f : 0.0f;
}
__device__ __forceinline__ float3 diffSign(const float3& a, const float3& b)
{
return make_float3(
a.x > b.x ? 1.0f : a.x < b.x ? -1.0f : 0.0f,
a.y > b.y ? 1.0f : a.y < b.y ? -1.0f : 0.0f,
a.z > b.z ? 1.0f : a.z < b.z ? -1.0f : 0.0f
);
}
__device__ __forceinline__ float4 diffSign(const float4& a, const float4& b)
{
return make_float4(
a.x > b.x ? 1.0f : a.x < b.x ? -1.0f : 0.0f,
a.y > b.y ? 1.0f : a.y < b.y ? -1.0f : 0.0f,
a.z > b.z ? 1.0f : a.z < b.z ? -1.0f : 0.0f,
0.0f
);
}
struct DiffSign : binary_function<float, float, float>
{
__device__ __forceinline__ float operator ()(float a, float b) const
{
return diffSign(a, b);
}
};
}
namespace cv { namespace gpu { namespace device
{
template <> struct TransformFunctorTraits<btv_l1_device::DiffSign> : DefaultTransformFunctorTraits<btv_l1_device::DiffSign>
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
}}}
namespace btv_l1_device
{
void diffSign(PtrStepSzf src1, PtrStepSzf src2, PtrStepSzf dst, cudaStream_t stream)
{
transform(src1, src2, dst, DiffSign(), WithOutMask(), stream);
}
__constant__ float c_btvRegWeights[16*16];
template <typename T>
__global__ void calcBtvRegularizationKernel(const PtrStepSz<T> src, PtrStep<T> dst, const int ksize)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x + ksize;
const int y = blockIdx.y * blockDim.y + threadIdx.y + ksize;
if (y >= src.rows - ksize || x >= src.cols - ksize)
return;
const T srcVal = src(y, x);
T dstVal = VecTraits<T>::all(0);
for (int m = 0, count = 0; m <= ksize; ++m)
{
for (int l = ksize; l + m >= 0; --l, ++count)
dstVal = dstVal + c_btvRegWeights[count] * (diffSign(srcVal, src(y + m, x + l)) - diffSign(src(y - m, x - l), srcVal));
}
dst(y, x) = dstVal;
}
void loadBtvWeights(const float* weights, size_t count)
{
cudaSafeCall( cudaMemcpyToSymbol(c_btvRegWeights, weights, count * sizeof(float)) );
}
template <int cn>
void calcBtvRegularization(PtrStepSzb src, PtrStepSzb dst, int ksize)
{
typedef typename TypeVec<float, cn>::vec_type src_t;
const dim3 block(32, 8);
const dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
calcBtvRegularizationKernel<src_t><<<grid, block>>>((PtrStepSz<src_t>) src, (PtrStepSz<src_t>) dst, ksize);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
template void calcBtvRegularization<1>(PtrStepSzb src, PtrStepSzb dst, int ksize);
template void calcBtvRegularization<3>(PtrStepSzb src, PtrStepSzb dst, int ksize);
template void calcBtvRegularization<4>(PtrStepSzb src, PtrStepSzb dst, int ksize);
}
#endif /* HAVE_OPENCV_GPU */
|
4b110f5dd87d646701b9de4286499d8c01a9014a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void reduce(int * vector,int size,int pot){
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int salto = pot/2;
while(salto){
if(idx<salto && idx+salto<size){
vector[idx]=vector[idx]+vector[idx+salto];
}
__syncthreads();
salto=salto/2;
}
return;
} | 4b110f5dd87d646701b9de4286499d8c01a9014a.cu | #include "includes.h"
__global__ void reduce(int * vector,int size,int pot){
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int salto = pot/2;
while(salto){
if(idx<salto && idx+salto<size){
vector[idx]=vector[idx]+vector[idx+salto];
}
__syncthreads();
salto=salto/2;
}
return;
} |
f7d73b80205c31abd03856daa32bd276830be570.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<math.h>
#include<time.h>
#include<stdexcept>
#include<iostream>
#include<cstdlib> //for abs(x)
#include<stdio.h>
#include<math.h>
using namespace std;
__global__ void findMin(int* A,int* current_min,int* mutex,unsigned int n);
int main()
{
const int NUMBER_OF_ELEMENTS = 1024*1024*20;
int* hostA = (int*)malloc(NUMBER_OF_ELEMENTS*sizeof(int));
int* hostMin = (int*)malloc(sizeof(int));
*hostMin = 1230000;
srand(time(0));
int i;
//initialize host vector by random elements
for(i=0;i<NUMBER_OF_ELEMENTS;i++)
{
int temp = rand() % 1230000;
if(temp<0){
temp*=-1;
temp = temp% 1230000;
}
else if(temp==0)
temp=34;
hostA[i] = temp;
}
int* deviceA,*deviceMin,*deviceMutex;
hipMalloc(&deviceA,NUMBER_OF_ELEMENTS*sizeof(int));
hipMalloc(&deviceMin,sizeof(int));
hipMalloc(&deviceMutex,sizeof(int));
hipMemcpy(deviceMin,hostMin,sizeof(int),hipMemcpyHostToDevice);
hipMemset(deviceMutex,0,sizeof(int));
hipMemcpy(deviceA,hostA,NUMBER_OF_ELEMENTS*sizeof(int),hipMemcpyHostToDevice);
//set up timing variables
float gpu_elapsed_time;
hipEvent_t gpu_start,gpu_stop;
hipEventCreate(&gpu_start);
hipEventCreate(&gpu_stop);
hipEventRecord(gpu_start,0);
hipLaunchKernelGGL(( findMin), dim3(256),dim3(256), 0, 0, deviceA,deviceMin,deviceMutex,NUMBER_OF_ELEMENTS);
hipDeviceSynchronize();
hipMemcpy(hostMin,deviceMin,sizeof(int),hipMemcpyDeviceToHost);
hipEventRecord(gpu_stop, 0);
hipEventSynchronize(gpu_stop);
hipEventElapsedTime(&gpu_elapsed_time, gpu_start, gpu_stop);
hipEventDestroy(gpu_start);
hipEventDestroy(gpu_stop);
cout<<"Answer by CUDA for MIN is = "<<*hostMin<<endl;
std::cout<<"The gpu took: "<<gpu_elapsed_time<<" milli-seconds"<<std::endl;
clock_t cpu_start = clock();
int minn = 1230000;
for(int i=0;i<NUMBER_OF_ELEMENTS;i++)
{
if(hostA[i]<minn)
minn = hostA[i];
}
clock_t cpu_stop = clock();
clock_t cpu_elapsed_time = 1000*(cpu_stop - cpu_start)/CLOCKS_PER_SEC;
cout<<"Expected min value is = "<<minn<<endl;
std::cout<<"The cpu took: "<<cpu_elapsed_time<<" milli-seconds"<<std::endl;
hipFree(deviceA);
delete[] hostA;
return hipDeviceSynchronize();
}
__global__ void findMin(int* A,int* current_min,int* mutex,unsigned int n)
{
//printf("threadIdx.x = %d and blockIdx = %d and gridDim.x = %d\n",threadIdx.x,blockIdx.x,gridDim.x);
unsigned int index = threadIdx.x + blockIdx.x*blockDim.x;
unsigned int stride = gridDim.x*blockDim.x;
unsigned int offset = 0;
__shared__ int cache[256];
int temp = 1230000;
while(index+offset<n)
{
//printf("A[i] = %d and current temp = %d\n",A[index+offset],temp);
temp = fminf(temp,A[index+offset]);
//printf("temp == %d\n",temp);
offset+=stride;
}
cache[threadIdx.x]=temp;
__syncthreads();
//reduction
//printf("blockDim.x = %d\n",blockDim.x/2);
unsigned int i=blockDim.x/2;
while(i!=0)
{
if(threadIdx.x<i)
{
cache[threadIdx.x] = fminf(cache[threadIdx.x],cache[threadIdx.x+i]);
}
__syncthreads();
i/=2;
}
if(threadIdx.x == 0)
while(atomicCAS(mutex,0,1)!=0);
//printf("current_min before = %d\n",*current_min);
*current_min = fminf(*current_min,cache[0]);
//printf("current_min = %d\n",*current_min);
atomicExch(mutex,0);
}
| f7d73b80205c31abd03856daa32bd276830be570.cu | #include<math.h>
#include<time.h>
#include<stdexcept>
#include<iostream>
#include<cstdlib> //for abs(x)
#include<stdio.h>
#include<math.h>
using namespace std;
__global__ void findMin(int* A,int* current_min,int* mutex,unsigned int n);
int main()
{
const int NUMBER_OF_ELEMENTS = 1024*1024*20;
int* hostA = (int*)malloc(NUMBER_OF_ELEMENTS*sizeof(int));
int* hostMin = (int*)malloc(sizeof(int));
*hostMin = 1230000;
srand(time(0));
int i;
//initialize host vector by random elements
for(i=0;i<NUMBER_OF_ELEMENTS;i++)
{
int temp = rand() % 1230000;
if(temp<0){
temp*=-1;
temp = temp% 1230000;
}
else if(temp==0)
temp=34;
hostA[i] = temp;
}
int* deviceA,*deviceMin,*deviceMutex;
cudaMalloc(&deviceA,NUMBER_OF_ELEMENTS*sizeof(int));
cudaMalloc(&deviceMin,sizeof(int));
cudaMalloc(&deviceMutex,sizeof(int));
cudaMemcpy(deviceMin,hostMin,sizeof(int),cudaMemcpyHostToDevice);
cudaMemset(deviceMutex,0,sizeof(int));
cudaMemcpy(deviceA,hostA,NUMBER_OF_ELEMENTS*sizeof(int),cudaMemcpyHostToDevice);
//set up timing variables
float gpu_elapsed_time;
cudaEvent_t gpu_start,gpu_stop;
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_stop);
cudaEventRecord(gpu_start,0);
findMin<<<256,256>>>(deviceA,deviceMin,deviceMutex,NUMBER_OF_ELEMENTS);
cudaDeviceSynchronize();
cudaMemcpy(hostMin,deviceMin,sizeof(int),cudaMemcpyDeviceToHost);
cudaEventRecord(gpu_stop, 0);
cudaEventSynchronize(gpu_stop);
cudaEventElapsedTime(&gpu_elapsed_time, gpu_start, gpu_stop);
cudaEventDestroy(gpu_start);
cudaEventDestroy(gpu_stop);
cout<<"Answer by CUDA for MIN is = "<<*hostMin<<endl;
std::cout<<"The gpu took: "<<gpu_elapsed_time<<" milli-seconds"<<std::endl;
clock_t cpu_start = clock();
int minn = 1230000;
for(int i=0;i<NUMBER_OF_ELEMENTS;i++)
{
if(hostA[i]<minn)
minn = hostA[i];
}
clock_t cpu_stop = clock();
clock_t cpu_elapsed_time = 1000*(cpu_stop - cpu_start)/CLOCKS_PER_SEC;
cout<<"Expected min value is = "<<minn<<endl;
std::cout<<"The cpu took: "<<cpu_elapsed_time<<" milli-seconds"<<std::endl;
cudaFree(deviceA);
delete[] hostA;
return cudaDeviceSynchronize();
}
__global__ void findMin(int* A,int* current_min,int* mutex,unsigned int n)
{
//printf("threadIdx.x = %d and blockIdx = %d and gridDim.x = %d\n",threadIdx.x,blockIdx.x,gridDim.x);
unsigned int index = threadIdx.x + blockIdx.x*blockDim.x;
unsigned int stride = gridDim.x*blockDim.x;
unsigned int offset = 0;
__shared__ int cache[256];
int temp = 1230000;
while(index+offset<n)
{
//printf("A[i] = %d and current temp = %d\n",A[index+offset],temp);
temp = fminf(temp,A[index+offset]);
//printf("temp == %d\n",temp);
offset+=stride;
}
cache[threadIdx.x]=temp;
__syncthreads();
//reduction
//printf("blockDim.x = %d\n",blockDim.x/2);
unsigned int i=blockDim.x/2;
while(i!=0)
{
if(threadIdx.x<i)
{
cache[threadIdx.x] = fminf(cache[threadIdx.x],cache[threadIdx.x+i]);
}
__syncthreads();
i/=2;
}
if(threadIdx.x == 0)
while(atomicCAS(mutex,0,1)!=0);
//printf("current_min before = %d\n",*current_min);
*current_min = fminf(*current_min,cache[0]);
//printf("current_min = %d\n",*current_min);
atomicExch(mutex,0);
}
|
c627793aa2380b9944de52fefe44b9f482c6fae3.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
//----------------------------------------------
// N float (N=1~1025)
// SIZE bytes
//
// N/2
//----------------------------------------------
#define N 1024
#define SIZE (N*sizeof(float))
#define GRID 1
#define BLOCK (N/2)
#define testLoop 1000 // loop
//----------------------------------------------
// (host kernel )
// __host__ __device__
//----------------------------------------------
inline __host__ __device__ void swap(float& a, float& b){
float c=a;
a=b;
b=c;
}
//----------------------------------------------
// kernel ( N a->r)
//----------------------------------------------
__global__ void bubble(float *r, float *a){
//*** blockDim=N/2 ***
int j=threadIdx.x; //j=0,1,2,...blockDim-1
int k=2*threadIdx.x; //k=0,2,4,...2*(blockDim-1)
//
__shared__ float s[N+20];
//
__syncthreads(); //, ( coalesced)
s[j]=a[j]; // (0~N/2-1)
s[j+N/2]=a[j+N/2]; // (N/2~N-1)
if(j==0){
// N , , 0
s[N-1]=a[N-1];
}
//
for(int loop=0; loop<=N/2; loop++){
// 0 based (0,1) (2,3) (4,5) ....
__syncthreads(); //
if(s[k]>s[k+1]){
swap(s[k],s[k+1]);
}
// 1 based (1,2) (3,4) (5,6) ....
__syncthreads(); //
if(s[k+1]>s[k+2]){
if(k<N-2) // N ,
swap(s[k+1],s[k+2]);
}
}
//
__syncthreads();
r[j]=s[j];
r[j+N/2]=s[j+N/2];
if(j==0){
r[N-1]=s[N-1];
}
}
//----------------------------------------------
// host
//----------------------------------------------
void bubble_host(float *r, float *a){
//
for(int k=0; k<N; k++){
r[k]=a[k];
}
for(int loop=0; loop<=N/2; loop++){
// 0 based
for(int k=0; k<N-1; k+=2){
if(r[k]>r[k+1]){
swap(r[k],r[k+1]);
}
}
// 1 based
for(int k=1; k<N-1; k+=2){
if(r[k]>r[k+1]){
swap(r[k],r[k+1]);
}
}
}
}
//----------------------------------------------
//
//----------------------------------------------
int main(){
// host
float *a=(float*)malloc(SIZE);
float *b=(float*)malloc(SIZE);
float *c=(float*)malloc(SIZE);
//
for(int k=0; k<N; k++){
a[k]=k;
c[k]=0;
}
// a
srand(time(0));
for(int k=0; k<2*N; k++){
int i=rand()%N;
int j=rand()%N;
swap(a[i],a[j]);
}
// device
float *ga, *gc;
hipMalloc((void**)&ga, SIZE);
hipMalloc((void**)&gc, SIZE);
// ( c )
hipMemcpy(ga, a, SIZE, hipMemcpyHostToDevice);
hipMemcpy(gc, c, SIZE, hipMemcpyHostToDevice);
// kernel
double t0=(double)clock()/CLOCKS_PER_SEC;
for(int k=0; k<testLoop; k++){
// kernel ( block )
hipLaunchKernelGGL(( bubble), dim3(1),dim3(BLOCK), 0, 0, gc,ga);
//, ,
hipDeviceSynchronize();
}
t0=((double)clock()/CLOCKS_PER_SEC-t0)/testLoop;
// host
double t1=(double)clock()/CLOCKS_PER_SEC;
for(int k=0; k<testLoop; k++){
bubble_host(b,a);
}
t1=((double)clock()/CLOCKS_PER_SEC-t1)/testLoop;
//,
printf("time[gpu]: %g ms\n",t0*1000);
printf("time[host]: %g ms\n",t1*1000);
printf("ratio: %g x\n",t1/t0);
// device
hipMemcpy(c, gc, SIZE, hipMemcpyDeviceToHost);
// device
printf("------------------------\n");
bool flag=true;
for(int k=0; k<N; k++){
if(c[k]!=k){
flag=false;
break;
}
}
printf("test[gpu]: %s\n",flag?"pass":"fail");
// host
flag=true;
for(int k=0; k<N; k++){
if(b[k]!=k){
flag=false;
break;
}
}
printf("test[host]: %s\n",flag?"pass":"fail");
//
hipFree(ga);
hipFree(gc);
free(a);
free(b);
free(c);
return 0;
}
| c627793aa2380b9944de52fefe44b9f482c6fae3.cu | #include <cuda.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
//----------------------------------------------
// 排序 N 個 float 元素 (N=1~1025)
// 使用到的記憶體大小為 SIZE 個 bytes
// 只使用單一區塊
// 區塊大小為 N/2
//----------------------------------------------
#define N 1024
#define SIZE (N*sizeof(float))
#define GRID 1
#define BLOCK (N/2)
#define testLoop 1000 //測試效能時的 loop 數
//----------------------------------------------
// 交換函式 (host 和 kernel 都可以使用)
// 因為加了 __host__ 和 __device__ 兩個標籤
//----------------------------------------------
inline __host__ __device__ void swap(float& a, float& b){
float c=a;
a=b;
b=c;
}
//----------------------------------------------
// 泡泡的 kernel (由小到大排列 N 個元素 a->r)
//----------------------------------------------
__global__ void bubble(float *r, float *a){
//*** blockDim=N/2 ***
int j=threadIdx.x; //j=0,1,2,...blockDim-1
int k=2*threadIdx.x; //k=0,2,4,...2*(blockDim-1) 配對的基底索引
//配置共享記憶體
__shared__ float s[N+20];
//載入資料到共享記憶體
__syncthreads(); //同步化執行緒, 加速載入速度 (合併讀取 coalesced)
s[j]=a[j]; //使用全部執行緒一起載入前半段 (0~N/2-1)
s[j+N/2]=a[j+N/2]; //使用全部執行緒一起載入後半段 (N/2~N-1)
if(j==0){
//若 N 為奇數時, 還要多載入一個尾巴, 只使用第 0 個執行緒
s[N-1]=a[N-1];
}
//開始泡泡排序
for(int loop=0; loop<=N/2; loop++){
//排列 0 based 配對資料 (0,1) (2,3) (4,5) ....
__syncthreads(); //同步化確保共享記憶體已寫入
if(s[k]>s[k+1]){
swap(s[k],s[k+1]);
}
//排列 1 based 配對資料 (1,2) (3,4) (5,6) ....
__syncthreads(); //同步化確保共享記憶體已寫入
if(s[k+1]>s[k+2]){
if(k<N-2) //若 N 為偶數時, 最後一個執行緒不作用
swap(s[k+1],s[k+2]);
}
}
//轉出資料到全域記憶體
__syncthreads();
r[j]=s[j];
r[j+N/2]=s[j+N/2];
if(j==0){
r[N-1]=s[N-1];
}
}
//----------------------------------------------
// 泡泡的 host 函數
//----------------------------------------------
void bubble_host(float *r, float *a){
//載入資料
for(int k=0; k<N; k++){
r[k]=a[k];
}
for(int loop=0; loop<=N/2; loop++){
//排列 0 based 配對資料
for(int k=0; k<N-1; k+=2){
if(r[k]>r[k+1]){
swap(r[k],r[k+1]);
}
}
//排列 1 based 配對資料
for(int k=1; k<N-1; k+=2){
if(r[k]>r[k+1]){
swap(r[k],r[k+1]);
}
}
}
}
//----------------------------------------------
// 主程式
//----------------------------------------------
int main(){
//配置 host 記憶體
float *a=(float*)malloc(SIZE);
float *b=(float*)malloc(SIZE);
float *c=(float*)malloc(SIZE);
//初始化
for(int k=0; k<N; k++){
a[k]=k;
c[k]=0;
}
//對陣列 a 洗牌
srand(time(0));
for(int k=0; k<2*N; k++){
int i=rand()%N;
int j=rand()%N;
swap(a[i],a[j]);
}
//配置 device 記憶體
float *ga, *gc;
cudaMalloc((void**)&ga, SIZE);
cudaMalloc((void**)&gc, SIZE);
//載入 (順便載入 c 來清空裝置記憶體內容)
cudaMemcpy(ga, a, SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(gc, c, SIZE, cudaMemcpyHostToDevice);
//測試 kernel 效能
double t0=(double)clock()/CLOCKS_PER_SEC;
for(int k=0; k<testLoop; k++){
//呼叫 kernel (此為單一 block 的版本)
bubble<<<1,BLOCK>>>(gc,ga);
//同步化執行緒, 避免還沒做完, 量到不正確的時間
cudaThreadSynchronize();
}
t0=((double)clock()/CLOCKS_PER_SEC-t0)/testLoop;
//測試 host 效能
double t1=(double)clock()/CLOCKS_PER_SEC;
for(int k=0; k<testLoop; k++){
bubble_host(b,a);
}
t1=((double)clock()/CLOCKS_PER_SEC-t1)/testLoop;
//顯示計算時間, 並比較
printf("time[gpu]: %g ms\n",t0*1000);
printf("time[host]: %g ms\n",t1*1000);
printf("ratio: %g x\n",t1/t0);
//讀出 device 資料
cudaMemcpy(c, gc, SIZE, cudaMemcpyDeviceToHost);
//測試 device 結果的正確性
printf("------------------------\n");
bool flag=true;
for(int k=0; k<N; k++){
if(c[k]!=k){
flag=false;
break;
}
}
printf("test[gpu]: %s\n",flag?"pass":"fail");
//測試 host 結果的正確性
flag=true;
for(int k=0; k<N; k++){
if(b[k]!=k){
flag=false;
break;
}
}
printf("test[host]: %s\n",flag?"pass":"fail");
//釋放記憶體
cudaFree(ga);
cudaFree(gc);
free(a);
free(b);
free(c);
return 0;
}
|
7fcca21a3a9852143b6c50505f22e7bd27a2c5df.hip | // !!! This is a file automatically generated by hipify!!!
#include "pch.h"
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
#include "hip/hip_fp16.h"
#include "cuda_fp16.hpp"
#include "hip/hip_runtime.h"
extern "C" {
#include "convolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "hip/hip_runtime.h"
}
half* publicMemory[2] = {0,0};
int pMSize[2] = {0,0};
extern "C" cudnnDataType_t GetDataType();
#ifdef TESTPROGRESS16
float* tempBuffer=0;
float* tempWeight = 0;
int iMaxSize=0;
#endif
void MakeHalfMaxSize(int iGiveSize,int iOutSize)
{
size_t size[2] = {iGiveSize,iOutSize};
for (int cnum = 0; cnum < 2; cnum++)
{
if (pMSize[cnum] < size[cnum])
{
if (publicMemory[cnum])
{
DecGenerateMemory(pMSize[cnum] * sizeof(half));
cuda_free_allType(publicMemory[cnum]);
}
pMSize[cnum] = size[cnum];
publicMemory[cnum]=(half *)cuda_make_short_array(pMSize[cnum]);
}
#ifdef TESTPROGRESS16
if (iMaxSize < pMSize[cnum])
{
iMaxSize = pMSize[cnum];
if (tempBuffer) cuda_free(tempBuffer);
tempBuffer = cuda_make_array(0, iMaxSize);
if (tempWeight) cuda_free_allType(tempWeight);
tempWeight = cuda_make_array(0, iMaxSize);
}
#endif
}
}
__global__ void cuda_f32_to_f16(float* input_f32, size_t size, half* output_f16)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f16[idx] = __float2half(input_f32[idx]);
//if (idx < size) output_f16[idx] = __float2half_rn(input_f32[idx]); // can't be compiled on Linux without casting
// __float2half_ru, __float2half_rd, __float2half_rz, __float2half_rn
//if (idx < size) *((unsigned short *)output_f16 + idx) = __float2half(input_f32[idx]);
}
void cuda_convert_f32_to_f16(float* input_f32, size_t size, half* output_f16) {
cuda_f32_to_f16 << < cuda_gridsize(size), BLOCK,0,get_cuda_stream() >> > (input_f32, size, (half*)output_f16);
check_error(hipPeekAtLastError());
}
__global__ void cuda_f16_to_f32(half* input_f16, size_t size, float* output_f32)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f32[idx] = __half2float(input_f16[idx]);
//if (idx < size) output_f32[idx] = __half2float(*((unsigned short *)input_f16 + idx));
}
void cuda_convert_f16_to_f32(half* input_f16, size_t size, float* output_f32) {
cuda_f16_to_f32 << < cuda_gridsize(size), BLOCK,0,get_cuda_stream() >> > ((half*)input_f16, size, output_f32);
check_error(hipPeekAtLastError());
}
void DealWeightBuffer(convolutional_layer* l)
{
//return;
#ifdef GETDATATYPE
if (GetDataType() != CUDNN_DATA_HALF) return;
#endif
#ifdef DEALWEIGHTBUFFER
OutPutGPUMemory(l.weights_gpu, l.nweights, 0);
#endif
half* halfWeights = 0;
halfWeights=(half *)cuda_make_short_array(l->nweights);
cuda_convert_f32_to_f16(l->weights_gpu, l->nweights, halfWeights);
#ifdef DEALWEIGHTBUFFER
float* fResult=0;
check_error(hipMalloc((void**)&fResult, l.nweights * sizeof(float)));
cuda_convert_f16_to_f32(halfWeights, l.nweights, fResult);
OutPutGPUMemory(fResult, l.nweights, 0);
#endif
cuda_free(l->weights_gpu);
DecGenerateMemory(l->nweights * sizeof(float));
l->weights_gpu = (float *)halfWeights;
LAYERDATA* layerdata = (LAYERDATA *)l->layerdata;
CONVPROP* prop=(CONVPROP *)layerdata->layerData;
if (prop->bUnSupportBias) return;
half* bias = (half*)cuda_make_short_array(l->n);
cuda_convert_f32_to_f16(l->biases_gpu, l->n, bias);
cuda_free(l->biases_gpu);
DecGenerateMemory(l->n * sizeof(float));
l->biases_gpu = (float*)bias;
}
#ifdef GPUHALFABILITY
__global__ void add_bias_half_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
half a = output[(batch * n + filter) * size + offset];
output[(batch * n + filter) * size + offset] =__hadd(a, biases[filter]);
}
void add_bias_half_gpu(half* output, half* biases, int batch, int n, int size)
{
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
add_bias_half_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
check_error(hipPeekAtLastError());
}
__global__ void activate_array_hardtan_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hlt(b, half(-1.0f))) output[iOutDex] = half(-1.0f);
if (__hgt(b, half(1.0f))) output[iOutDex] = half(1.0f);
output[iOutDex] = b;
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// if (a < -1) a = -1;
// if (a > 1) a = 1;
// x[index] = a;//hardtan_activate_kernel(x[index]);
//}
}
__global__ void activate_array_relu_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hgt(b, half(0.0f))) output[iOutDex] = b;
else output[iOutDex] = half(0.0f);
//output[iOutDex] = b;
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// x[index] = a * (a > 0);// relu_activate_kernel(x[index]);
//}
}
__global__ void activate_array_leaky_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hgt(b, half(0.0f))) output[iOutDex] = b;
else output[iOutDex] =__hmul(half(0.1f),b);
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// x[index] = (a > 0) ? a : .1f * a; //leaky_activate_kernel(x[index]);
//}
}
//__global__ void activate_array_selu_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int offset = blockIdx.x * blockDim.x + threadIdx.x;
// int filter = blockIdx.y;
// int batch = blockIdx.z;
// if (offset >= size) return;
// int iOutDex = (batch * n + filter) * size + offset;
// half a = output[iOutDex];
// half b = __hadd(a, biases[filter]);
// if (__hgt(b, half(0.0f))) output[iOutDex] = b;
// else output[iOutDex] = __hmul(half(0.1f), b);
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = (a >= 0) * 1.0507f * a + (a < 0) * 1.0507f * 1.6732f * (expf(a) - 1);
// }
//}
//
//__global__ void activate_array_logistic_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = 1.f / (1.f + expf(-a));
// }
//}
//
//__global__ void activate_array_tanh_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = (2.f / (1 + expf(-2 * a)) - 1);
// }
//}
#endif
void add_bias_activation_half_gpu(half* output, half* biases, int batch, int n, int size
,ACTIVATION act,int bUnSupportAct,int bUnsportBias)
{
#ifdef GPUHALFABILITY
if (bUnsportBias) return;
if (bUnSupportAct)
{
add_bias_half_gpu(output, biases, batch, n, size);
return;
}
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
switch (act)
{
case RELU:
activate_array_relu_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case LINEAR:
break;
case LEAKY:
activate_array_leaky_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case HARDTAN:
activate_array_hardtan_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
/* case SELU:
activate_array_selu_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case LOGISTIC:
activate_array_logistic_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case TANH:
activate_array_tanh_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;*/
}
check_error(hipPeekAtLastError());
#endif
}
void forward_convolutional_layer_gpu_predict_Float16(convolutional_layer l, network net)
{
if (l.binary) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if (l.xnor) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu);
swap_binary(&l);
binarize_gpu(net.input_gpu, l.c * l.h * l.w * l.batch, l.binary_input_gpu);
net.input_gpu = l.binary_input_gpu;
}
float one = 1.0f,zero=0.0f;
#ifdef MEMORYDEBUG
printf("gpuInput:0x%x,gpuOutput:0x%x bin:%d,xnor:%d\n", (unsigned int)net.input_gpu, (unsigned int)l.output_gpu, l.binary, l.xnor);
printf("workspace:0x%x,size:%d,", (unsigned int)net.workspace, l.workspace_size);
printf("inputsize:%d,outputSize:%d\n", net.inputs, l.outputs);
#endif
#ifdef FORWARD_CONVOLUTIONAL_LAYER_GPUHALF
OutPutGPUMemory(net.input_gpu, net.inputs,0);
#endif
LAYERDATA* data = (LAYERDATA *)l.layerdata;
CONVPROP* prop = (CONVPROP*)data->layerData;
void* input=0;
void* output = 0;
if (prop->bIn32)
{
cuda_convert_f32_to_f16(net.input_gpu, net.inputs, publicMemory[0]);
input = publicMemory[0];
}
else
{
input = net.input_gpu;
}
if (prop->bOut32)
{
output = publicMemory[1];
}
else
{
output = l.output_gpu;
}
#ifdef GETDATATYPE
float* fa, *fw;
fa = cuda_make_array(0, net.inputs);
fw = cuda_make_array(0, l.nweights);
cuda_convert_f16_to_f32(publicMemory[0], net.inputs, fa);
cuda_convert_f16_to_f32((half *)l.weights_gpu, l.nweights, fw);
OutPutGPUMemory(fa, net.inputs, 0);
OutPutGPUMemory(fw, l.nweights, 0);
#endif
cudnnStatus_t stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&zero,
l.dstTensorDesc,
output);
checkcudnnerror(stat);
#ifdef GETDATATYPE
/*if (GetDataType() == CUDNN_DATA_FLOAT)
{
OutPutGPUMemory((float *)publicMemory[1], l.outputs, 0);
cudnnStatus_t stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
publicMemory[0]);
checkcudnnerror(stat);
OutPutGPUMemory((float*)publicMemory[0], l.outputs, 0);
stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
publicMemory[0]);
checkcudnnerror(stat);
OutPutGPUMemory((float*)l.output_gpu, l.outputs, 0);
cuda_convert_f32_to_f16((float *)publicMemory[1], l.outputs, (half*)publicMemory[0]);
hipError_t stats = hipMemcpy(publicMemory[1], publicMemory[0], l.outputs * sizeof(float), hipMemcpyDeviceToDevice);
}*/
#endif
#ifdef TESTPROGRESS16
if (output == l.output_gpu)
{
hipMemcpy(publicMemory[1], l.output_gpu, l.outputs * sizeof(half), hipMemcpyDeviceToDevice);
}
cuda_convert_f16_to_f32((half*)publicMemory[1], l.outputs, tempBuffer);
//OutPutGPUMemory(l.output_gpu, l.outputs, 0);
cuda_convert_f16_to_f32((half*)l.biases_gpu, l.n, tempWeight);
add_bias_gpu(tempBuffer, tempWeight, l.batch, l.n, l.out_w * l.out_h);
activate_array_ongpu(tempBuffer, l.outputs * l.batch, l.activation);
//OutPutGPUMemory(l.output_gpu, l.outputs, 0);
cuda_convert_f32_to_f16(tempBuffer, l.outputs, publicMemory[1]);
if (output == l.output_gpu)
{
hipMemcpy(l.output_gpu, publicMemory[1], l.outputs * sizeof(half),hipMemcpyDeviceToDevice);
}
#else
add_bias_activation_half_gpu((half*)output, (half*)l.biases_gpu, l.batch, l.n, l.out_w* l.out_h,l.activation
,prop->bUnSupportActivate,prop->bUnSupportBias);
#endif
if (prop->bOut32)
{
cuda_convert_f16_to_f32((half*)output, l.outputs, l.output_gpu);
}
#ifdef MEMORYDEBUG
printf("End Forword Cudnn\n");
//if (prop->bUnSupportActivate) OutPutGPUMemory(l.output_gpu, l.outputs, 0);
#endif
if(prop->bUnSupportBias) add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w * l.out_h);
if(prop->bUnSupportActivate) activate_array_ongpu(l.output_gpu, l.outputs * l.batch, l.activation);
#ifdef MEMORYDEBUG
//if (prop->bUnSupportActivate) OutPutGPUMemory(l.output_gpu, l.outputs, 0);
#endif
//if(l.dot > 0) dot_error_gpu(l);
if (l.binary || l.xnor) swap_binary(&l);
} | 7fcca21a3a9852143b6c50505f22e7bd27a2c5df.cu | #include "pch.h"
#include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
#include "cuda_fp16.h"
#include "cuda_fp16.hpp"
#include "cuda.h"
extern "C" {
#include "convolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "cuda.h"
}
half* publicMemory[2] = {0,0};
int pMSize[2] = {0,0};
extern "C" cudnnDataType_t GetDataType();
#ifdef TESTPROGRESS16
float* tempBuffer=0;
float* tempWeight = 0;
int iMaxSize=0;
#endif
void MakeHalfMaxSize(int iGiveSize,int iOutSize)
{
size_t size[2] = {iGiveSize,iOutSize};
for (int cnum = 0; cnum < 2; cnum++)
{
if (pMSize[cnum] < size[cnum])
{
if (publicMemory[cnum])
{
DecGenerateMemory(pMSize[cnum] * sizeof(half));
cuda_free_allType(publicMemory[cnum]);
}
pMSize[cnum] = size[cnum];
publicMemory[cnum]=(half *)cuda_make_short_array(pMSize[cnum]);
}
#ifdef TESTPROGRESS16
if (iMaxSize < pMSize[cnum])
{
iMaxSize = pMSize[cnum];
if (tempBuffer) cuda_free(tempBuffer);
tempBuffer = cuda_make_array(0, iMaxSize);
if (tempWeight) cuda_free_allType(tempWeight);
tempWeight = cuda_make_array(0, iMaxSize);
}
#endif
}
}
__global__ void cuda_f32_to_f16(float* input_f32, size_t size, half* output_f16)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f16[idx] = __float2half(input_f32[idx]);
//if (idx < size) output_f16[idx] = __float2half_rn(input_f32[idx]); // can't be compiled on Linux without casting
// __float2half_ru, __float2half_rd, __float2half_rz, __float2half_rn
//if (idx < size) *((unsigned short *)output_f16 + idx) = __float2half(input_f32[idx]);
}
void cuda_convert_f32_to_f16(float* input_f32, size_t size, half* output_f16) {
cuda_f32_to_f16 << < cuda_gridsize(size), BLOCK,0,get_cuda_stream() >> > (input_f32, size, (half*)output_f16);
check_error(cudaPeekAtLastError());
}
__global__ void cuda_f16_to_f32(half* input_f16, size_t size, float* output_f32)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f32[idx] = __half2float(input_f16[idx]);
//if (idx < size) output_f32[idx] = __half2float(*((unsigned short *)input_f16 + idx));
}
void cuda_convert_f16_to_f32(half* input_f16, size_t size, float* output_f32) {
cuda_f16_to_f32 << < cuda_gridsize(size), BLOCK,0,get_cuda_stream() >> > ((half*)input_f16, size, output_f32);
check_error(cudaPeekAtLastError());
}
void DealWeightBuffer(convolutional_layer* l)
{
//return;
#ifdef GETDATATYPE
if (GetDataType() != CUDNN_DATA_HALF) return;
#endif
#ifdef DEALWEIGHTBUFFER
OutPutGPUMemory(l.weights_gpu, l.nweights, 0);
#endif
half* halfWeights = 0;
halfWeights=(half *)cuda_make_short_array(l->nweights);
cuda_convert_f32_to_f16(l->weights_gpu, l->nweights, halfWeights);
#ifdef DEALWEIGHTBUFFER
float* fResult=0;
check_error(cudaMalloc((void**)&fResult, l.nweights * sizeof(float)));
cuda_convert_f16_to_f32(halfWeights, l.nweights, fResult);
OutPutGPUMemory(fResult, l.nweights, 0);
#endif
cuda_free(l->weights_gpu);
DecGenerateMemory(l->nweights * sizeof(float));
l->weights_gpu = (float *)halfWeights;
LAYERDATA* layerdata = (LAYERDATA *)l->layerdata;
CONVPROP* prop=(CONVPROP *)layerdata->layerData;
if (prop->bUnSupportBias) return;
half* bias = (half*)cuda_make_short_array(l->n);
cuda_convert_f32_to_f16(l->biases_gpu, l->n, bias);
cuda_free(l->biases_gpu);
DecGenerateMemory(l->n * sizeof(float));
l->biases_gpu = (float*)bias;
}
#ifdef GPUHALFABILITY
__global__ void add_bias_half_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
half a = output[(batch * n + filter) * size + offset];
output[(batch * n + filter) * size + offset] =__hadd(a, biases[filter]);
}
void add_bias_half_gpu(half* output, half* biases, int batch, int n, int size)
{
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
add_bias_half_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
check_error(cudaPeekAtLastError());
}
__global__ void activate_array_hardtan_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hlt(b, half(-1.0f))) output[iOutDex] = half(-1.0f);
if (__hgt(b, half(1.0f))) output[iOutDex] = half(1.0f);
output[iOutDex] = b;
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// if (a < -1) a = -1;
// if (a > 1) a = 1;
// x[index] = a;//hardtan_activate_kernel(x[index]);
//}
}
__global__ void activate_array_relu_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hgt(b, half(0.0f))) output[iOutDex] = b;
else output[iOutDex] = half(0.0f);
//output[iOutDex] = b;
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// x[index] = a * (a > 0);// relu_activate_kernel(x[index]);
//}
}
__global__ void activate_array_leaky_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hgt(b, half(0.0f))) output[iOutDex] = b;
else output[iOutDex] =__hmul(half(0.1f),b);
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// x[index] = (a > 0) ? a : .1f * a; //leaky_activate_kernel(x[index]);
//}
}
//__global__ void activate_array_selu_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int offset = blockIdx.x * blockDim.x + threadIdx.x;
// int filter = blockIdx.y;
// int batch = blockIdx.z;
// if (offset >= size) return;
// int iOutDex = (batch * n + filter) * size + offset;
// half a = output[iOutDex];
// half b = __hadd(a, biases[filter]);
// if (__hgt(b, half(0.0f))) output[iOutDex] = b;
// else output[iOutDex] = __hmul(half(0.1f), b);
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = (a >= 0) * 1.0507f * a + (a < 0) * 1.0507f * 1.6732f * (expf(a) - 1);
// }
//}
//
//__global__ void activate_array_logistic_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = 1.f / (1.f + expf(-a));
// }
//}
//
//__global__ void activate_array_tanh_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = (2.f / (1 + expf(-2 * a)) - 1);
// }
//}
#endif
void add_bias_activation_half_gpu(half* output, half* biases, int batch, int n, int size
,ACTIVATION act,int bUnSupportAct,int bUnsportBias)
{
#ifdef GPUHALFABILITY
if (bUnsportBias) return;
if (bUnSupportAct)
{
add_bias_half_gpu(output, biases, batch, n, size);
return;
}
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
switch (act)
{
case RELU:
activate_array_relu_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case LINEAR:
break;
case LEAKY:
activate_array_leaky_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case HARDTAN:
activate_array_hardtan_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
/* case SELU:
activate_array_selu_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case LOGISTIC:
activate_array_logistic_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case TANH:
activate_array_tanh_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;*/
}
check_error(cudaPeekAtLastError());
#endif
}
void forward_convolutional_layer_gpu_predict_Float16(convolutional_layer l, network net)
{
if (l.binary) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if (l.xnor) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu);
swap_binary(&l);
binarize_gpu(net.input_gpu, l.c * l.h * l.w * l.batch, l.binary_input_gpu);
net.input_gpu = l.binary_input_gpu;
}
float one = 1.0f,zero=0.0f;
#ifdef MEMORYDEBUG
printf("gpuInput:0x%x,gpuOutput:0x%x bin:%d,xnor:%d\n", (unsigned int)net.input_gpu, (unsigned int)l.output_gpu, l.binary, l.xnor);
printf("workspace:0x%x,size:%d,", (unsigned int)net.workspace, l.workspace_size);
printf("inputsize:%d,outputSize:%d\n", net.inputs, l.outputs);
#endif
#ifdef FORWARD_CONVOLUTIONAL_LAYER_GPUHALF
OutPutGPUMemory(net.input_gpu, net.inputs,0);
#endif
LAYERDATA* data = (LAYERDATA *)l.layerdata;
CONVPROP* prop = (CONVPROP*)data->layerData;
void* input=0;
void* output = 0;
if (prop->bIn32)
{
cuda_convert_f32_to_f16(net.input_gpu, net.inputs, publicMemory[0]);
input = publicMemory[0];
}
else
{
input = net.input_gpu;
}
if (prop->bOut32)
{
output = publicMemory[1];
}
else
{
output = l.output_gpu;
}
#ifdef GETDATATYPE
float* fa, *fw;
fa = cuda_make_array(0, net.inputs);
fw = cuda_make_array(0, l.nweights);
cuda_convert_f16_to_f32(publicMemory[0], net.inputs, fa);
cuda_convert_f16_to_f32((half *)l.weights_gpu, l.nweights, fw);
OutPutGPUMemory(fa, net.inputs, 0);
OutPutGPUMemory(fw, l.nweights, 0);
#endif
cudnnStatus_t stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&zero,
l.dstTensorDesc,
output);
checkcudnnerror(stat);
#ifdef GETDATATYPE
/*if (GetDataType() == CUDNN_DATA_FLOAT)
{
OutPutGPUMemory((float *)publicMemory[1], l.outputs, 0);
cudnnStatus_t stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
publicMemory[0]);
checkcudnnerror(stat);
OutPutGPUMemory((float*)publicMemory[0], l.outputs, 0);
stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
publicMemory[0]);
checkcudnnerror(stat);
OutPutGPUMemory((float*)l.output_gpu, l.outputs, 0);
cuda_convert_f32_to_f16((float *)publicMemory[1], l.outputs, (half*)publicMemory[0]);
cudaError_t stats = cudaMemcpy(publicMemory[1], publicMemory[0], l.outputs * sizeof(float), cudaMemcpyDeviceToDevice);
}*/
#endif
#ifdef TESTPROGRESS16
if (output == l.output_gpu)
{
cudaMemcpy(publicMemory[1], l.output_gpu, l.outputs * sizeof(half), cudaMemcpyDeviceToDevice);
}
cuda_convert_f16_to_f32((half*)publicMemory[1], l.outputs, tempBuffer);
//OutPutGPUMemory(l.output_gpu, l.outputs, 0);
cuda_convert_f16_to_f32((half*)l.biases_gpu, l.n, tempWeight);
add_bias_gpu(tempBuffer, tempWeight, l.batch, l.n, l.out_w * l.out_h);
activate_array_ongpu(tempBuffer, l.outputs * l.batch, l.activation);
//OutPutGPUMemory(l.output_gpu, l.outputs, 0);
cuda_convert_f32_to_f16(tempBuffer, l.outputs, publicMemory[1]);
if (output == l.output_gpu)
{
cudaMemcpy(l.output_gpu, publicMemory[1], l.outputs * sizeof(half),cudaMemcpyDeviceToDevice);
}
#else
add_bias_activation_half_gpu((half*)output, (half*)l.biases_gpu, l.batch, l.n, l.out_w* l.out_h,l.activation
,prop->bUnSupportActivate,prop->bUnSupportBias);
#endif
if (prop->bOut32)
{
cuda_convert_f16_to_f32((half*)output, l.outputs, l.output_gpu);
}
#ifdef MEMORYDEBUG
printf("End Forword Cudnn\n");
//if (prop->bUnSupportActivate) OutPutGPUMemory(l.output_gpu, l.outputs, 0);
#endif
if(prop->bUnSupportBias) add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w * l.out_h);
if(prop->bUnSupportActivate) activate_array_ongpu(l.output_gpu, l.outputs * l.batch, l.activation);
#ifdef MEMORYDEBUG
//if (prop->bUnSupportActivate) OutPutGPUMemory(l.output_gpu, l.outputs, 0);
#endif
//if(l.dot > 0) dot_error_gpu(l);
if (l.binary || l.xnor) swap_binary(&l);
} |
3f31dcc32349d89e6c4ab838b78ca552abd08bb1.hip | // !!! This is a file automatically generated by hipify!!!
#include <assert.h>
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <algorithm>
#include "dense_nova.h"
#include "plugin.h"
#include <NvInfer.h>
#include <assert.h>
#include <hipcub/hipcub.hpp>
#include <iostream>
#include <stdio.h>
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include <thrust/sort.h>
namespace NAMESPACE
{
extern "C" __global__ void ScatterFP16(const __half *features_rw, const int *indices_rw, __half *output_rw,
int spatialShape0, int spatialShape1, int spatialShape2,
int num_voxels, int num_features)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = idx; i < num_voxels; i += stride)
{
int4 coor = reinterpret_cast<const int4*>(indices_rw)[i];
int output_vol = spatialShape0 * spatialShape1 * spatialShape2;
//remove init -1.
if(coor.x < 0 || coor.y < 0 || coor.z < 0 || coor.w < 0 || coor.y >= spatialShape0 || coor.z >= spatialShape1 || coor.w >= spatialShape2) continue;
// out shape: (bs, c, x, y, z)
__half *outPerBatch = output_rw + coor.x * num_features * output_vol;
int offset = coor.y * spatialShape1 * spatialShape2 + coor.z * spatialShape2 + coor.w;
for(int j = 0; j < num_features; ++j)
outPerBatch[j * output_vol + offset] = features_rw[i * num_features + j];
}
}
void cuda_scatter_fp16(const __half *features_rw, const int *indices_rw, __half *output_rw, std::vector<int> spatialShape_rw,
int num_voxels, int num_features)
{
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the
// maximum occupancy for a full device launch
checkCudaErrors(hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, ScatterFP16));
minGridSize = ::min(minGridSize, DivUp(num_voxels, blockSize));
hipLaunchKernelGGL(( ScatterFP16), dim3(minGridSize), dim3(blockSize), 0, 0, features_rw, indices_rw, output_rw, spatialShape_rw[0], spatialShape_rw[1], spatialShape_rw[2], num_voxels, num_features);
hipDeviceSynchronize();
}
} //namespace | 3f31dcc32349d89e6c4ab838b78ca552abd08bb1.cu | #include <assert.h>
#include <cuda.h>
#include <cuda_fp16.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <algorithm>
#include "dense_nova.h"
#include "plugin.h"
#include <NvInfer.h>
#include <assert.h>
#include <cub/cub.cuh>
#include <iostream>
#include <stdio.h>
#include <thrust/device_ptr.h>
#include <thrust/fill.h>
#include <thrust/sort.h>
namespace NAMESPACE
{
extern "C" __global__ void ScatterFP16(const __half *features_rw, const int *indices_rw, __half *output_rw,
int spatialShape0, int spatialShape1, int spatialShape2,
int num_voxels, int num_features)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = idx; i < num_voxels; i += stride)
{
int4 coor = reinterpret_cast<const int4*>(indices_rw)[i];
int output_vol = spatialShape0 * spatialShape1 * spatialShape2;
//remove init -1.
if(coor.x < 0 || coor.y < 0 || coor.z < 0 || coor.w < 0 || coor.y >= spatialShape0 || coor.z >= spatialShape1 || coor.w >= spatialShape2) continue;
// out shape: (bs, c, x, y, z)
__half *outPerBatch = output_rw + coor.x * num_features * output_vol;
int offset = coor.y * spatialShape1 * spatialShape2 + coor.z * spatialShape2 + coor.w;
for(int j = 0; j < num_features; ++j)
outPerBatch[j * output_vol + offset] = features_rw[i * num_features + j];
}
}
void cuda_scatter_fp16(const __half *features_rw, const int *indices_rw, __half *output_rw, std::vector<int> spatialShape_rw,
int num_voxels, int num_features)
{
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the
// maximum occupancy for a full device launch
checkCudaErrors(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, ScatterFP16));
minGridSize = std::min(minGridSize, DivUp(num_voxels, blockSize));
ScatterFP16<<<minGridSize, blockSize>>>(features_rw, indices_rw, output_rw, spatialShape_rw[0], spatialShape_rw[1], spatialShape_rw[2], num_voxels, num_features);
cudaDeviceSynchronize();
}
} //namespace |
aaf0d5e69526a2533320a2902e33b0ae6f7cd1bd.hip | // !!! This is a file automatically generated by hipify!!!
/*
** Projeto de Algoritmos Paralelos
** Multiplicao de Matrizes
*/
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <hip/hip_runtime_api.h>
#define TAM_BLOCO 16
__global__ void cuda_multiplicarmatriz(float* M, float* N, float* R, int tamM, int tamN) {
//ndice do bloco
int bx = blockIdx.x;
int by = blockIdx.y;
// ndice da thread
int tx = threadIdx.x;
int ty = threadIdx.y;
// ndice da primeira submatriz de M processado pelo bloco
int mComeco = tamM * TAM_BLOCO * by;
// ndice da ltima submatriz de M processada pelo bloco
int mFim = mComeco + tamM - 1;
// Tamanho do passo utilizado para interar atravs das submatrizes de M
int mPasso = TAM_BLOCO;
// ndice da primeira submatriz de N processada pelo bloco
int nComeco = TAM_BLOCO * bx;
// Tamanho do passo utilizado para interar atravs das submatrizes de N
int nPasso = TAM_BLOCO * tamN;
// O elemento computado pela thread
float rRes = 0;
// Varre por todas as submatrizes de M e N requeridas
// para computar o bloco de submatriz
for (int m = mComeco, n = nComeco; m <= mFim; m += mPasso, n += nPasso) {
// Memoria compartilhada para a submatriz de M
__shared__ float Msub[TAM_BLOCO][TAM_BLOCO];
// Memoria compartilhada para a submatriz de N
__shared__ float Nsub[TAM_BLOCO][TAM_BLOCO];
// Carrega as matrizes da memria global para a memria
// compartilhada. Cada thread carreg um elemento de cada
// matriz
Msub[ty][tx] = M[m + tamM * ty + tx];
Nsub[ty][tx] = N[n + tamN * ty + tx];
// Sincroniza para garantir que todas as matrizes foram
// carregadas
__syncthreads();
// Multiplica as duas matrizes.
// Cada thread computa um elemento
// do bloco da submatriz
for (int i = 0; i < TAM_BLOCO; ++i)
rRes += Msub[ty][i] * Nsub[i][tx];
// Sincroniza para grantir que a computao de multiplicao
// est feita antes de carregar duas novas submatrizes de
// M e N na prxima interao
__syncthreads();
}
// Esscre o bloco da sumatriz na memria global
// Cada thread escreve um nico elemento
int r = tamN * TAM_BLOCO * by + TAM_BLOCO * bx;
R[r + tamN * ty + tx] = rRes;
}
// Funo para rodar na CPU
// Computa R = M * N
// aM a altura de M
// lM a largura de M
// lN a largura de N
void multiplicar(const float* M, const float* N, float* R, int aM, int lM, int lN) {
int tam;
// Carrega M e N para a GPU
float* Md;
tam = aM * lM * sizeof(float);
hipMalloc((void**)&Md, tam);
hipMemcpy(Md, M, tam, hipMemcpyHostToDevice);
float* Nd;
tam = lM * lN * sizeof(float);
hipMalloc((void**)&Nd, tam);
hipMemcpy(Nd, N, tam, hipMemcpyHostToDevice);
// Aloca R na GPU
float* Rd;
tam = aM * lN * sizeof(float);
hipMalloc((void**)&Rd, tam);
// Computa a configurao da execuo assumindo que
// as dimenses das matrizes so mltiplos de TAM_BLOCO
dim3 dimBlock(TAM_BLOCO, TAM_BLOCO);
dim3 dimGrid(lN / dimBlock.x, aM / dimBlock.y);
// Processa a computao na GPU
hipLaunchKernelGGL(( cuda_multiplicarmatriz), dim3(dimGrid), dim3(dimBlock), 0, 0, Md, Nd, Rd, lM, lN);
// Carrega R da GPU
hipMemcpy(R, Rd, tam, hipMemcpyDeviceToHost);
// Limpa a memria da GPU
hipFree(Md);
hipFree(Nd);
hipFree(Rd);
}
int checkGpu() {
int count;
hipError_t erro;
hipProfilerStart();
erro = hipGetDeviceCount(&count);
if (erro != hipSuccess) {
printf("Erro: %s\n", hipGetErrorString(erro));
return 0;
}
if (count < 1) {
printf("Erro: %s\n", "Este computador no possui um dispositivo com GPU compatvel com CUDA disponvel.");
return 0;
}
return 1;
}
void matriz_preencher(float* A, int tam) {
for (int i = 0; i < tam*tam; i++)
A[i] = (float)(rand() % 100);
}
void matriz_exibir(float* A, int tam) {
for (int i = 0; i < tam; i++) {
for (int j = 0; j < tam; j++) {
printf("%0.2f ", A[tam*i+j]);
}
printf("\n");
}
}
int main(int argc, const char * argv[]){
float A[TAM_BLOCO*TAM_BLOCO];
float B[TAM_BLOCO*TAM_BLOCO];
float C[TAM_BLOCO*TAM_BLOCO];
int aA = TAM_BLOCO;
int lA = TAM_BLOCO;
int lB = TAM_BLOCO;
if (!checkGpu())
exit(EXIT_FAILURE);
srand(time(NULL));
matriz_preencher(A, aA);
matriz_preencher(B, aA);
printf("Matriz A\n");
matriz_exibir(A, aA);
printf("Matriz B\n");
matriz_exibir(B, aA);
multiplicar(A, B, C, aA, lA, lB);
printf("RESULTADO\n");
matriz_exibir(C, aA);
hipDeviceReset();
exit(EXIT_SUCCESS);
} | aaf0d5e69526a2533320a2902e33b0ae6f7cd1bd.cu | /*
** Projeto de Algoritmos Paralelos
** Multiplicação de Matrizes
*/
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <cuda_profiler_api.h>
#define TAM_BLOCO 16
__global__ void cuda_multiplicarmatriz(float* M, float* N, float* R, int tamM, int tamN) {
//índice do bloco
int bx = blockIdx.x;
int by = blockIdx.y;
// índice da thread
int tx = threadIdx.x;
int ty = threadIdx.y;
// índice da primeira submatriz de M processado pelo bloco
int mComeco = tamM * TAM_BLOCO * by;
// índice da última submatriz de M processada pelo bloco
int mFim = mComeco + tamM - 1;
// Tamanho do passo utilizado para interar através das submatrizes de M
int mPasso = TAM_BLOCO;
// Índice da primeira submatriz de N processada pelo bloco
int nComeco = TAM_BLOCO * bx;
// Tamanho do passo utilizado para interar através das submatrizes de N
int nPasso = TAM_BLOCO * tamN;
// O elemento computado pela thread
float rRes = 0;
// Varre por todas as submatrizes de M e N requeridas
// para computar o bloco de submatriz
for (int m = mComeco, n = nComeco; m <= mFim; m += mPasso, n += nPasso) {
// Memoria compartilhada para a submatriz de M
__shared__ float Msub[TAM_BLOCO][TAM_BLOCO];
// Memoria compartilhada para a submatriz de N
__shared__ float Nsub[TAM_BLOCO][TAM_BLOCO];
// Carrega as matrizes da memória global para a memória
// compartilhada. Cada thread carreg um elemento de cada
// matriz
Msub[ty][tx] = M[m + tamM * ty + tx];
Nsub[ty][tx] = N[n + tamN * ty + tx];
// Sincroniza para garantir que todas as matrizes foram
// carregadas
__syncthreads();
// Multiplica as duas matrizes.
// Cada thread computa um elemento
// do bloco da submatriz
for (int i = 0; i < TAM_BLOCO; ++i)
rRes += Msub[ty][i] * Nsub[i][tx];
// Sincroniza para grantir que a computação de multiplicação
// está feita antes de carregar duas novas submatrizes de
// M e N na próxima interação
__syncthreads();
}
// Esscre o bloco da sumatriz na memória global
// Cada thread escreve um único elemento
int r = tamN * TAM_BLOCO * by + TAM_BLOCO * bx;
R[r + tamN * ty + tx] = rRes;
}
// Função para rodar na CPU
// Computa R = M * N
// aM é a altura de M
// lM é a largura de M
// lN é a largura de N
void multiplicar(const float* M, const float* N, float* R, int aM, int lM, int lN) {
int tam;
// Carrega M e N para a GPU
float* Md;
tam = aM * lM * sizeof(float);
cudaMalloc((void**)&Md, tam);
cudaMemcpy(Md, M, tam, cudaMemcpyHostToDevice);
float* Nd;
tam = lM * lN * sizeof(float);
cudaMalloc((void**)&Nd, tam);
cudaMemcpy(Nd, N, tam, cudaMemcpyHostToDevice);
// Aloca R na GPU
float* Rd;
tam = aM * lN * sizeof(float);
cudaMalloc((void**)&Rd, tam);
// Computa a configuração da execução assumindo que
// as dimensões das matrizes são múltiplos de TAM_BLOCO
dim3 dimBlock(TAM_BLOCO, TAM_BLOCO);
dim3 dimGrid(lN / dimBlock.x, aM / dimBlock.y);
// Processa a computação na GPU
cuda_multiplicarmatriz<<<dimGrid, dimBlock>>>(Md, Nd, Rd, lM, lN);
// Carrega R da GPU
cudaMemcpy(R, Rd, tam, cudaMemcpyDeviceToHost);
// Limpa a memória da GPU
cudaFree(Md);
cudaFree(Nd);
cudaFree(Rd);
}
int checkGpu() {
int count;
cudaError_t erro;
cudaProfilerStart();
erro = cudaGetDeviceCount(&count);
if (erro != cudaSuccess) {
printf("Erro: %s\n", cudaGetErrorString(erro));
return 0;
}
if (count < 1) {
printf("Erro: %s\n", "Este computador não possui um dispositivo com GPU compatível com CUDA disponível.");
return 0;
}
return 1;
}
void matriz_preencher(float* A, int tam) {
for (int i = 0; i < tam*tam; i++)
A[i] = (float)(rand() % 100);
}
void matriz_exibir(float* A, int tam) {
for (int i = 0; i < tam; i++) {
for (int j = 0; j < tam; j++) {
printf("%0.2f ", A[tam*i+j]);
}
printf("\n");
}
}
int main(int argc, const char * argv[]){
float A[TAM_BLOCO*TAM_BLOCO];
float B[TAM_BLOCO*TAM_BLOCO];
float C[TAM_BLOCO*TAM_BLOCO];
int aA = TAM_BLOCO;
int lA = TAM_BLOCO;
int lB = TAM_BLOCO;
if (!checkGpu())
exit(EXIT_FAILURE);
srand(time(NULL));
matriz_preencher(A, aA);
matriz_preencher(B, aA);
printf("Matriz A\n");
matriz_exibir(A, aA);
printf("Matriz B\n");
matriz_exibir(B, aA);
multiplicar(A, B, C, aA, lA, lB);
printf("RESULTADO\n");
matriz_exibir(C, aA);
cudaDeviceReset();
exit(EXIT_SUCCESS);
} |
0679807a1a211ae2e3f05a80085ae6cf6995e624.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* Example showing the use of CUFFT for fast 1D-convolution using FFT.
* This sample is the same as simpleCUFFT, except that it uses a callback
* function to perform the pointwise multiply and scale, on input to the
* inverse transform.
*
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include <hipfftXt.h>
#include <helper_functions.h>
#include <helper_cuda.h>
// Complex data type
typedef float2 Complex;
static __device__ __host__ inline Complex ComplexAdd(Complex, Complex);
static __device__ __host__ inline Complex ComplexScale(Complex, float);
static __device__ __host__ inline Complex ComplexMul(Complex, Complex);
// This is the callback routine prototype
static __device__ hipfftComplex ComplexPointwiseMulAndScale(void * a, size_t index, void * cb_info, void *sharedmem);
typedef struct _cb_params{
Complex *filter;
float scale;
} cb_params;
// This is the callback routine. It does complex pointwise multiplication with scaling.
static __device__ hipfftComplex ComplexPointwiseMulAndScale(void *a, size_t index, void *cb_info, void *sharedmem)
{
cb_params * my_params = (cb_params *)cb_info;
return (hipfftComplex)ComplexScale(ComplexMul(((Complex *)a)[index],
(my_params->filter)[index]),
my_params->scale);
}
// Define the device pointer to the callback routine. The host code will fetch this and pass it to CUFFT
__device__ cufftCallbackLoadC myOwnCallbackPtr = ComplexPointwiseMulAndScale;
// Filtering functions
void Convolve(const Complex *, int, const Complex *, int, Complex *);
// Padding functions
int PadData(const Complex *, Complex **, int,
const Complex *, Complex **, int);
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
int runTest(int argc, char **argv);
// The filter size is assumed to be a number smaller than the signal size
#define SIGNAL_SIZE 50
#define FILTER_KERNEL_SIZE 11
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
struct hipDeviceProp_t properties;
int device;
checkCudaErrors(hipGetDevice(&device));
checkCudaErrors(hipGetDeviceProperties(&properties, device));
if( !(properties.major >= 2) ) {
printf("simpleCUFFT_callback requires CUDA architecture SM2.0 or higher\n");
return EXIT_WAIVED;
}
return runTest(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUFFT callbacks
////////////////////////////////////////////////////////////////////////////////
int runTest(int argc, char **argv)
{
printf("[simpleCUFFT_callback] is starting...\n");
findCudaDevice(argc, (const char **)argv);
// Allocate host memory for the signal
Complex *h_signal = (Complex *)malloc(sizeof(Complex) * SIGNAL_SIZE);
// Initialize the memory for the signal
for (unsigned int i = 0; i < SIGNAL_SIZE; ++i)
{
h_signal[i].x = rand() / (float)RAND_MAX;
h_signal[i].y = 0;
}
// Allocate host memory for the filter
Complex *h_filter_kernel = (Complex *)malloc(sizeof(Complex) * FILTER_KERNEL_SIZE);
// Initialize the memory for the filter
for (unsigned int i = 0; i < FILTER_KERNEL_SIZE; ++i)
{
h_filter_kernel[i].x = rand() / (float)RAND_MAX;
h_filter_kernel[i].y = 0;
}
// Pad signal and filter kernel
Complex *h_padded_signal;
Complex *h_padded_filter_kernel;
int new_size = PadData(h_signal, &h_padded_signal, SIGNAL_SIZE,
h_filter_kernel, &h_padded_filter_kernel, FILTER_KERNEL_SIZE);
int mem_size = sizeof(Complex) * new_size;
// Allocate device memory for signal
Complex *d_signal;
checkCudaErrors(hipMalloc((void **)&d_signal, mem_size));
// Copy host memory to device
checkCudaErrors(hipMemcpy(d_signal, h_padded_signal, mem_size,
hipMemcpyHostToDevice));
// Allocate device memory for filter kernel
Complex *d_filter_kernel;
checkCudaErrors(hipMalloc((void **)&d_filter_kernel, mem_size));
// Copy host memory to device
checkCudaErrors(hipMemcpy(d_filter_kernel, h_padded_filter_kernel, mem_size,
hipMemcpyHostToDevice));
// Create one CUFFT plan for the forward transforms, and one for the reverse transform
// with load callback.
hipfftHandle plan, cb_plan;
size_t work_size;
checkCudaErrors(hipfftCreate(&plan));
checkCudaErrors(hipfftCreate(&cb_plan));
checkCudaErrors(hipfftMakePlan1d(plan, new_size, HIPFFT_C2C, 1, &work_size));
checkCudaErrors(hipfftMakePlan1d(cb_plan, new_size, HIPFFT_C2C, 1, &work_size));
// Define a structure used to pass in the device address of the filter kernel, and
// the scale factor
cb_params h_params;
h_params.filter = d_filter_kernel;
h_params.scale = 1.0f / new_size;
// Allocate device memory for parameters
cb_params *d_params;
checkCudaErrors(hipMalloc((void **)&d_params, sizeof(cb_params)));
// Copy host memory to device
checkCudaErrors(hipMemcpy(d_params, &h_params, sizeof(cb_params),
hipMemcpyHostToDevice));
// The host needs to get a copy of the device pointer to the callback
cufftCallbackLoadC hostCopyOfCallbackPtr;
checkCudaErrors(hipMemcpyFromSymbol(&hostCopyOfCallbackPtr,
myOwnCallbackPtr,
sizeof(hostCopyOfCallbackPtr)));
// Now associate the load callback with the plan.
hipfftResult status = cufftXtSetCallback(cb_plan,
(void **)&hostCopyOfCallbackPtr,
CUFFT_CB_LD_COMPLEX,
(void **)&d_params);
if (status == HIPFFT_LICENSE_ERROR)
{
printf("This sample requires a valid license file.\n");
printf("The file was either not found, out of date, or otherwise invalid.\n");
return EXIT_WAIVED;
}
checkCudaErrors(cufftXtSetCallback(cb_plan,
(void **)&hostCopyOfCallbackPtr,
CUFFT_CB_LD_COMPLEX,
(void **)&d_params));
// Transform signal and kernel
printf("Transforming signal hipfftExecC2C\n");
checkCudaErrors(hipfftExecC2C(plan, (hipfftComplex *)d_signal, (hipfftComplex *)d_signal, HIPFFT_FORWARD));
checkCudaErrors(hipfftExecC2C(plan, (hipfftComplex *)d_filter_kernel, (hipfftComplex *)d_filter_kernel, HIPFFT_FORWARD));
// Transform signal back, using the callback to do the pointwise multiply on the way in.
printf("Transforming signal back hipfftExecC2C\n");
checkCudaErrors(hipfftExecC2C(cb_plan, (hipfftComplex *)d_signal, (hipfftComplex *)d_signal, HIPFFT_BACKWARD));
// Copy device memory to host
Complex *h_convolved_signal = h_padded_signal;
checkCudaErrors(hipMemcpy(h_convolved_signal, d_signal, mem_size,
hipMemcpyDeviceToHost));
// Allocate host memory for the convolution result
Complex *h_convolved_signal_ref = (Complex *)malloc(sizeof(Complex) * SIGNAL_SIZE);
// Convolve on the host
Convolve(h_signal, SIGNAL_SIZE,
h_filter_kernel, FILTER_KERNEL_SIZE,
h_convolved_signal_ref);
// check result
bool bTestResult = sdkCompareL2fe((float *)h_convolved_signal_ref, (float *)h_convolved_signal, 2 * SIGNAL_SIZE, 1e-5f);
//Destroy CUFFT context
checkCudaErrors(hipfftDestroy(plan));
checkCudaErrors(hipfftDestroy(cb_plan));
// cleanup memory
free(h_signal);
free(h_filter_kernel);
free(h_padded_signal);
free(h_padded_filter_kernel);
free(h_convolved_signal_ref);
checkCudaErrors(hipFree(d_signal));
checkCudaErrors(hipFree(d_filter_kernel));
checkCudaErrors(hipFree(d_params));
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
hipDeviceReset();
return bTestResult ? EXIT_SUCCESS : EXIT_FAILURE;
}
// Pad data
int PadData(const Complex *signal, Complex **padded_signal, int signal_size,
const Complex *filter_kernel, Complex **padded_filter_kernel, int filter_kernel_size)
{
int minRadius = filter_kernel_size / 2;
int maxRadius = filter_kernel_size - minRadius;
int new_size = signal_size + maxRadius;
// Pad signal
Complex *new_data = (Complex *)malloc(sizeof(Complex) * new_size);
memcpy(new_data + 0, signal, signal_size * sizeof(Complex));
memset(new_data + signal_size, 0, (new_size - signal_size) * sizeof(Complex));
*padded_signal = new_data;
// Pad filter
new_data = (Complex *)malloc(sizeof(Complex) * new_size);
memcpy(new_data + 0, filter_kernel + minRadius, maxRadius * sizeof(Complex));
memset(new_data + maxRadius, 0, (new_size - filter_kernel_size) * sizeof(Complex));
memcpy(new_data + new_size - minRadius, filter_kernel, minRadius * sizeof(Complex));
*padded_filter_kernel = new_data;
return new_size;
}
////////////////////////////////////////////////////////////////////////////////
// Filtering operations
////////////////////////////////////////////////////////////////////////////////
// Computes convolution on the host
void Convolve(const Complex *signal, int signal_size,
const Complex *filter_kernel, int filter_kernel_size,
Complex *filtered_signal)
{
int minRadius = filter_kernel_size / 2;
int maxRadius = filter_kernel_size - minRadius;
// Loop over output element indices
for (int i = 0; i < signal_size; ++i)
{
filtered_signal[i].x = filtered_signal[i].y = 0;
// Loop over convolution indices
for (int j = - maxRadius + 1; j <= minRadius; ++j)
{
int k = i + j;
if (k >= 0 && k < signal_size)
{
filtered_signal[i] = ComplexAdd(filtered_signal[i], ComplexMul(signal[k], filter_kernel[minRadius - j]));
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Complex operations
////////////////////////////////////////////////////////////////////////////////
// Complex addition
static __device__ __host__ inline Complex ComplexAdd(Complex a, Complex b)
{
Complex c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
// Complex scale
static __device__ __host__ inline Complex ComplexScale(Complex a, float s)
{
Complex c;
c.x = s * a.x;
c.y = s * a.y;
return c;
}
// Complex multiplication
static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b)
{
Complex c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
}
| 0679807a1a211ae2e3f05a80085ae6cf6995e624.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/*
* Example showing the use of CUFFT for fast 1D-convolution using FFT.
* This sample is the same as simpleCUFFT, except that it uses a callback
* function to perform the pointwise multiply and scale, on input to the
* inverse transform.
*
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cuda_runtime.h>
#include <cufft.h>
#include <cufftXt.h>
#include <helper_functions.h>
#include <helper_cuda.h>
// Complex data type
typedef float2 Complex;
static __device__ __host__ inline Complex ComplexAdd(Complex, Complex);
static __device__ __host__ inline Complex ComplexScale(Complex, float);
static __device__ __host__ inline Complex ComplexMul(Complex, Complex);
// This is the callback routine prototype
static __device__ cufftComplex ComplexPointwiseMulAndScale(void * a, size_t index, void * cb_info, void *sharedmem);
typedef struct _cb_params{
Complex *filter;
float scale;
} cb_params;
// This is the callback routine. It does complex pointwise multiplication with scaling.
static __device__ cufftComplex ComplexPointwiseMulAndScale(void *a, size_t index, void *cb_info, void *sharedmem)
{
cb_params * my_params = (cb_params *)cb_info;
return (cufftComplex)ComplexScale(ComplexMul(((Complex *)a)[index],
(my_params->filter)[index]),
my_params->scale);
}
// Define the device pointer to the callback routine. The host code will fetch this and pass it to CUFFT
__device__ cufftCallbackLoadC myOwnCallbackPtr = ComplexPointwiseMulAndScale;
// Filtering functions
void Convolve(const Complex *, int, const Complex *, int, Complex *);
// Padding functions
int PadData(const Complex *, Complex **, int,
const Complex *, Complex **, int);
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
int runTest(int argc, char **argv);
// The filter size is assumed to be a number smaller than the signal size
#define SIGNAL_SIZE 50
#define FILTER_KERNEL_SIZE 11
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
struct cudaDeviceProp properties;
int device;
checkCudaErrors(cudaGetDevice(&device));
checkCudaErrors(cudaGetDeviceProperties(&properties, device));
if( !(properties.major >= 2) ) {
printf("simpleCUFFT_callback requires CUDA architecture SM2.0 or higher\n");
return EXIT_WAIVED;
}
return runTest(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUFFT callbacks
////////////////////////////////////////////////////////////////////////////////
int runTest(int argc, char **argv)
{
printf("[simpleCUFFT_callback] is starting...\n");
findCudaDevice(argc, (const char **)argv);
// Allocate host memory for the signal
Complex *h_signal = (Complex *)malloc(sizeof(Complex) * SIGNAL_SIZE);
// Initialize the memory for the signal
for (unsigned int i = 0; i < SIGNAL_SIZE; ++i)
{
h_signal[i].x = rand() / (float)RAND_MAX;
h_signal[i].y = 0;
}
// Allocate host memory for the filter
Complex *h_filter_kernel = (Complex *)malloc(sizeof(Complex) * FILTER_KERNEL_SIZE);
// Initialize the memory for the filter
for (unsigned int i = 0; i < FILTER_KERNEL_SIZE; ++i)
{
h_filter_kernel[i].x = rand() / (float)RAND_MAX;
h_filter_kernel[i].y = 0;
}
// Pad signal and filter kernel
Complex *h_padded_signal;
Complex *h_padded_filter_kernel;
int new_size = PadData(h_signal, &h_padded_signal, SIGNAL_SIZE,
h_filter_kernel, &h_padded_filter_kernel, FILTER_KERNEL_SIZE);
int mem_size = sizeof(Complex) * new_size;
// Allocate device memory for signal
Complex *d_signal;
checkCudaErrors(cudaMalloc((void **)&d_signal, mem_size));
// Copy host memory to device
checkCudaErrors(cudaMemcpy(d_signal, h_padded_signal, mem_size,
cudaMemcpyHostToDevice));
// Allocate device memory for filter kernel
Complex *d_filter_kernel;
checkCudaErrors(cudaMalloc((void **)&d_filter_kernel, mem_size));
// Copy host memory to device
checkCudaErrors(cudaMemcpy(d_filter_kernel, h_padded_filter_kernel, mem_size,
cudaMemcpyHostToDevice));
// Create one CUFFT plan for the forward transforms, and one for the reverse transform
// with load callback.
cufftHandle plan, cb_plan;
size_t work_size;
checkCudaErrors(cufftCreate(&plan));
checkCudaErrors(cufftCreate(&cb_plan));
checkCudaErrors(cufftMakePlan1d(plan, new_size, CUFFT_C2C, 1, &work_size));
checkCudaErrors(cufftMakePlan1d(cb_plan, new_size, CUFFT_C2C, 1, &work_size));
// Define a structure used to pass in the device address of the filter kernel, and
// the scale factor
cb_params h_params;
h_params.filter = d_filter_kernel;
h_params.scale = 1.0f / new_size;
// Allocate device memory for parameters
cb_params *d_params;
checkCudaErrors(cudaMalloc((void **)&d_params, sizeof(cb_params)));
// Copy host memory to device
checkCudaErrors(cudaMemcpy(d_params, &h_params, sizeof(cb_params),
cudaMemcpyHostToDevice));
// The host needs to get a copy of the device pointer to the callback
cufftCallbackLoadC hostCopyOfCallbackPtr;
checkCudaErrors(cudaMemcpyFromSymbol(&hostCopyOfCallbackPtr,
myOwnCallbackPtr,
sizeof(hostCopyOfCallbackPtr)));
// Now associate the load callback with the plan.
cufftResult status = cufftXtSetCallback(cb_plan,
(void **)&hostCopyOfCallbackPtr,
CUFFT_CB_LD_COMPLEX,
(void **)&d_params);
if (status == CUFFT_LICENSE_ERROR)
{
printf("This sample requires a valid license file.\n");
printf("The file was either not found, out of date, or otherwise invalid.\n");
return EXIT_WAIVED;
}
checkCudaErrors(cufftXtSetCallback(cb_plan,
(void **)&hostCopyOfCallbackPtr,
CUFFT_CB_LD_COMPLEX,
(void **)&d_params));
// Transform signal and kernel
printf("Transforming signal cufftExecC2C\n");
checkCudaErrors(cufftExecC2C(plan, (cufftComplex *)d_signal, (cufftComplex *)d_signal, CUFFT_FORWARD));
checkCudaErrors(cufftExecC2C(plan, (cufftComplex *)d_filter_kernel, (cufftComplex *)d_filter_kernel, CUFFT_FORWARD));
// Transform signal back, using the callback to do the pointwise multiply on the way in.
printf("Transforming signal back cufftExecC2C\n");
checkCudaErrors(cufftExecC2C(cb_plan, (cufftComplex *)d_signal, (cufftComplex *)d_signal, CUFFT_INVERSE));
// Copy device memory to host
Complex *h_convolved_signal = h_padded_signal;
checkCudaErrors(cudaMemcpy(h_convolved_signal, d_signal, mem_size,
cudaMemcpyDeviceToHost));
// Allocate host memory for the convolution result
Complex *h_convolved_signal_ref = (Complex *)malloc(sizeof(Complex) * SIGNAL_SIZE);
// Convolve on the host
Convolve(h_signal, SIGNAL_SIZE,
h_filter_kernel, FILTER_KERNEL_SIZE,
h_convolved_signal_ref);
// check result
bool bTestResult = sdkCompareL2fe((float *)h_convolved_signal_ref, (float *)h_convolved_signal, 2 * SIGNAL_SIZE, 1e-5f);
//Destroy CUFFT context
checkCudaErrors(cufftDestroy(plan));
checkCudaErrors(cufftDestroy(cb_plan));
// cleanup memory
free(h_signal);
free(h_filter_kernel);
free(h_padded_signal);
free(h_padded_filter_kernel);
free(h_convolved_signal_ref);
checkCudaErrors(cudaFree(d_signal));
checkCudaErrors(cudaFree(d_filter_kernel));
checkCudaErrors(cudaFree(d_params));
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
return bTestResult ? EXIT_SUCCESS : EXIT_FAILURE;
}
// Pad data
int PadData(const Complex *signal, Complex **padded_signal, int signal_size,
const Complex *filter_kernel, Complex **padded_filter_kernel, int filter_kernel_size)
{
int minRadius = filter_kernel_size / 2;
int maxRadius = filter_kernel_size - minRadius;
int new_size = signal_size + maxRadius;
// Pad signal
Complex *new_data = (Complex *)malloc(sizeof(Complex) * new_size);
memcpy(new_data + 0, signal, signal_size * sizeof(Complex));
memset(new_data + signal_size, 0, (new_size - signal_size) * sizeof(Complex));
*padded_signal = new_data;
// Pad filter
new_data = (Complex *)malloc(sizeof(Complex) * new_size);
memcpy(new_data + 0, filter_kernel + minRadius, maxRadius * sizeof(Complex));
memset(new_data + maxRadius, 0, (new_size - filter_kernel_size) * sizeof(Complex));
memcpy(new_data + new_size - minRadius, filter_kernel, minRadius * sizeof(Complex));
*padded_filter_kernel = new_data;
return new_size;
}
////////////////////////////////////////////////////////////////////////////////
// Filtering operations
////////////////////////////////////////////////////////////////////////////////
// Computes convolution on the host
void Convolve(const Complex *signal, int signal_size,
const Complex *filter_kernel, int filter_kernel_size,
Complex *filtered_signal)
{
int minRadius = filter_kernel_size / 2;
int maxRadius = filter_kernel_size - minRadius;
// Loop over output element indices
for (int i = 0; i < signal_size; ++i)
{
filtered_signal[i].x = filtered_signal[i].y = 0;
// Loop over convolution indices
for (int j = - maxRadius + 1; j <= minRadius; ++j)
{
int k = i + j;
if (k >= 0 && k < signal_size)
{
filtered_signal[i] = ComplexAdd(filtered_signal[i], ComplexMul(signal[k], filter_kernel[minRadius - j]));
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Complex operations
////////////////////////////////////////////////////////////////////////////////
// Complex addition
static __device__ __host__ inline Complex ComplexAdd(Complex a, Complex b)
{
Complex c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
// Complex scale
static __device__ __host__ inline Complex ComplexScale(Complex a, float s)
{
Complex c;
c.x = s * a.x;
c.y = s * a.y;
return c;
}
// Complex multiplication
static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b)
{
Complex c;
c.x = a.x * b.x - a.y * b.y;
c.y = a.x * b.y + a.y * b.x;
return c;
}
|
601c462c86bdecb59823b6b8b95eded8b2bd8d07.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void fillImage(int width, int height, int value, int* devOutput) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int index = y * width + x;
if ((y < height) && (x < width)) {
devOutput[index] = value;
}
} | 601c462c86bdecb59823b6b8b95eded8b2bd8d07.cu | #include "includes.h"
__global__ void fillImage(int width, int height, int value, int* devOutput) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int index = y * width + x;
if ((y < height) && (x < width)) {
devOutput[index] = value;
}
} |
5e0df14267f41bdb550622982ac5359030c58807.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "rbm_cuda.cuh"
__global__
void trainKernel(int* train_vec_in_batch, int* movies_in_batch, int* ratings_in_batch,
float* Vzeros, float* Vts, float* Hzeros, float* Hts,
float* W, float* BV, float* BH, float* W_inc, float* BV_inc, float* BH_inc,
int batch_size, int num_movies_in_this_batch, const int i_batch_start,
const bool update_weights) {
unsigned int user = blockIdx.x * blockDim.x + threadIdx.x;
while (user < batch_size) {
int start = train_vec_in_batch[2 * user];
int end = train_vec_in_batch[2 * user + 1];
int size = end - start;
start -= i_batch_start;
if (size != 0) {
float* V0 = Vzeros + K * start;
float* Vt = Vts + K * start;
float* H0 = Hzeros + user * F;
float* Ht = Hts + user * F;
int* u_movies = movies_in_batch + start;
int* u_ratings = ratings_in_batch + start;
// initialize V0
for (int i = 0; i < size; i++) {
V0[i * K + u_ratings[i] - 1] = 1;
}
//////////////// positive phase ////////////////
for (int i = 0; i < size; i++) {
float* W_user = W + u_movies[i] * (K * F);
for (int j = 0; j < F; j++) {
for (int k = 0; k < K; k++) {
H0[j] += W_user[j * K + k] * V0[i * K + k];
}
}
}
// add bias and logistic function on H0
for (int j = 0; j < F; j++) {
H0[j] += BH[j];
H0[j] = 1.0 / (1 + exp(-H0[j]));
}
if (update_weights) {
//////////////// negative phase ////////////////
for (int i = 0; i < size; i++) {
float* W_user = W + u_movies[i] * (K * F);
for (int j = 0; j < F; j++) {
for (int k = 0; k < K; k++) {
Vt[i * K + k] += H0[j] * W_user[j * K + k];
}
}
// normalize Vt
float sum_k = 0.0;
for (int k = 0; k < K; k++) {
Vt[i * K + k] += BV[u_movies[i] * K + k]; // add bias
Vt[i * K + k] = exp(Vt[i * K + k]); // exponential
sum_k += Vt[i * K + k];
}
for (int k = 0; k < K; k++) {
Vt[i * K + k] /= sum_k;
}
}
// compute Ht
for (int i = 0; i < size; i++) {
float* W_user = W + u_movies[i] * (K * F);
for (int j = 0; j < F; j++) {
for (int k = 0; k < K; k++) {
Ht[j] += W_user[j * K + k] * Vt[i * K + k];
}
}
}
// add bias and logistic function on Ht
for (int j = 0; j < F; j++) {
Ht[j] += BV[j];
Ht[j] = 1.0 / (1 + exp(-Ht[j]));
}
//////////////// update weight increments ////////////////
// update BV_inc
for (int i = 0; i < size; i++) {
for (int k = 0; k < K; k++) {
BV_inc[u_movies[i] * K + k] += (V0[i * K + k] - Vt[i * K + k]);
}
}
// update W_inc
for (int i = 0; i < size; i++) {
for (int j = 0; j < F; j++) {
for (int k = 0; k < K; k++) {
W_inc[u_movies[i] * K * F + j * K + k] += (H0[j] * V0[i * K + k] - Ht[j] * Vt[i * K + k]);
}
}
}
// update BH_inc
for (int j = 0; j < F; j++) {
BH_inc[user * F + j] = (H0[j] - Ht[j]);
}
} // end update weights
}
user += blockDim.x * gridDim.x;
}
}
__global__
void updateW_kernel(float* W, float* W_inc, const unsigned int M, const float lrate) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < K * F * M) {
W[i] += lrate * W_inc[i];
i += blockDim.x * gridDim.x;
}
}
__global__
void updateBV_kernel(float* BV, float* BV_inc, const unsigned int M, const float lrate) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < K * M) {
BV[i] += lrate * BV_inc[i];
i += blockDim.x * gridDim.x;
}
}
__global__
void updateBH_kernel(float* BH, float* BH_inc, const float lrate_BH, const int batch_size) {
extern __shared__ float sBH_inc[];
unsigned int tid = threadIdx.x;
sBH_inc[tid] = 0;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < batch_size) {
sBH_inc[tid] += BH_inc[i * F];
i += blockDim.x * gridDim.x;
}
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1) {
if (tid < s) {
sBH_inc[tid] += sBH_inc[tid + s];
}
__syncthreads();
}
if (tid < 32) {
atomicAdd(&sBH_inc[tid], sBH_inc[tid + 32]);
atomicAdd(&sBH_inc[tid], sBH_inc[tid + 16]);
atomicAdd(&sBH_inc[tid], sBH_inc[tid + 8]);
atomicAdd(&sBH_inc[tid], sBH_inc[tid + 4]);
atomicAdd(&sBH_inc[tid], sBH_inc[tid + 2]);
atomicAdd(&sBH_inc[tid], sBH_inc[tid + 1]);
}
if (tid == 0)
atomicAdd(BH, lrate_BH * sBH_inc[0]);
}
__global__
void predictKernel(int* test_vec_in_batch, int* test_movies_in_batch, int* test_ratings_in_batch,
float* Hzeros, float* Vts,
float* W, float* BV,
int batch_size, const int num_test_movies_in_this_batch, const int i_test_batch_start,
float* results_in_batch) {
unsigned int user = blockIdx.x * blockDim.x + threadIdx.x;
while (user < batch_size) {
int start = test_vec_in_batch[2 * user];
int end = test_vec_in_batch[2 * user + 1];
int size = end - start;
start -= i_test_batch_start;
if (size != 0) {
float* H0 = Hzeros + user * F;
float* Vt = Vts + K * start;
int* u_movies = test_movies_in_batch + start;
//////////////// negative phase ////////////////
for (int i = 0; i < size; i++) {
float* W_user = W + u_movies[i] * (K * F);
for (int j = 0; j < F; j++) {
for (int k = 0; k < K; k++) {
Vt[i * K + k] += H0[j] * W_user[j * K + k];
}
}
// normalize Vt
float sum_k = 0.0;
for (int k = 0; k < K; k++) {
Vt[i * K + k] += BV[u_movies[i] * K + k]; // add bias
Vt[i * K + k] = exp(Vt[i * K + k]); // exponential
sum_k += Vt[i * K + k];
}
for (int k = 0; k < K; k++) {
Vt[i * K + k] /= sum_k;
}
// update results
float score = 0;
for (int k = 0; k < K; k++) {
score += (k + 1) * Vt[i * K + k];
}
results_in_batch[start + i] = score;
}
}
user += blockDim.x * gridDim.x;
}
}
void train(int* train_vec_in_batch, int* movies_in_batch, int* ratings_in_batch,
int* test_vec_in_batch, int* test_movies_in_batch, int* test_ratings_in_batch,
float* Vzeros, float* Vts, float* Hzeros, float* Hts,
float* W, float* BV, float* BH, float* W_inc, float* BV_inc, float* BH_inc,
int batch_size, int num_movies_in_this_batch, const int i_batch_start,
const int num_test_movies_in_this_batch, const int i_test_batch_start,
const unsigned int M, const float lrate, const float lrate_BH,
float* results_in_batch, const bool update_weights,
int blocks, int threadsPerBlock) {
if (update_weights) {
hipLaunchKernelGGL(( trainKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0,
train_vec_in_batch, movies_in_batch, ratings_in_batch,
Vzeros, Vts, Hzeros, Hts,
W, BV, BH,
W_inc, BV_inc, BH_inc,
batch_size, num_movies_in_this_batch, i_batch_start,
true);
unsigned int Wblocks = min(blocks, (int)ceil(K * F * M / (float)threadsPerBlock));
hipLaunchKernelGGL(( updateW_kernel), dim3(Wblocks), dim3(threadsPerBlock), 0, 0, W, W_inc, M, lrate);
unsigned int BVblocks = min(blocks, (int)ceil(K * M / (float)threadsPerBlock));
hipLaunchKernelGGL(( updateBV_kernel), dim3(BVblocks), dim3(threadsPerBlock), 0, 0, BV, BV_inc, M, lrate);
unsigned int BHblocks = min(blocks, (int)ceil(batch_size / (float)threadsPerBlock));
for (int j = 0; j < F; j++) {
hipLaunchKernelGGL(( updateBH_kernel), dim3(BHblocks), dim3(threadsPerBlock), threadsPerBlock * sizeof(float), 0,
BH + j, BH_inc + j, lrate_BH, batch_size);
}
}
// in prediction stage
else {
hipLaunchKernelGGL(( trainKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0,
train_vec_in_batch, movies_in_batch, ratings_in_batch,
Vzeros, Vts, Hzeros, Hts,
W, BV, BH,
W_inc, BV_inc, BH_inc,
batch_size, num_movies_in_this_batch, i_batch_start,
false);
// TODO: update Vt, and compute results
hipLaunchKernelGGL(( predictKernel), dim3(blocks), dim3(threadsPerBlock), 0, 0,
test_vec_in_batch, test_movies_in_batch, test_ratings_in_batch,
Hzeros, Vts,
W, BV,
batch_size, num_test_movies_in_this_batch, i_test_batch_start,
results_in_batch);
}
}
| 5e0df14267f41bdb550622982ac5359030c58807.cu | #include <cstdio>
#include <cstdlib>
#include <cuda_runtime.h>
#include <cuda.h>
#include "rbm_cuda.cuh"
__global__
void trainKernel(int* train_vec_in_batch, int* movies_in_batch, int* ratings_in_batch,
float* Vzeros, float* Vts, float* Hzeros, float* Hts,
float* W, float* BV, float* BH, float* W_inc, float* BV_inc, float* BH_inc,
int batch_size, int num_movies_in_this_batch, const int i_batch_start,
const bool update_weights) {
unsigned int user = blockIdx.x * blockDim.x + threadIdx.x;
while (user < batch_size) {
int start = train_vec_in_batch[2 * user];
int end = train_vec_in_batch[2 * user + 1];
int size = end - start;
start -= i_batch_start;
if (size != 0) {
float* V0 = Vzeros + K * start;
float* Vt = Vts + K * start;
float* H0 = Hzeros + user * F;
float* Ht = Hts + user * F;
int* u_movies = movies_in_batch + start;
int* u_ratings = ratings_in_batch + start;
// initialize V0
for (int i = 0; i < size; i++) {
V0[i * K + u_ratings[i] - 1] = 1;
}
//////////////// positive phase ////////////////
for (int i = 0; i < size; i++) {
float* W_user = W + u_movies[i] * (K * F);
for (int j = 0; j < F; j++) {
for (int k = 0; k < K; k++) {
H0[j] += W_user[j * K + k] * V0[i * K + k];
}
}
}
// add bias and logistic function on H0
for (int j = 0; j < F; j++) {
H0[j] += BH[j];
H0[j] = 1.0 / (1 + exp(-H0[j]));
}
if (update_weights) {
//////////////// negative phase ////////////////
for (int i = 0; i < size; i++) {
float* W_user = W + u_movies[i] * (K * F);
for (int j = 0; j < F; j++) {
for (int k = 0; k < K; k++) {
Vt[i * K + k] += H0[j] * W_user[j * K + k];
}
}
// normalize Vt
float sum_k = 0.0;
for (int k = 0; k < K; k++) {
Vt[i * K + k] += BV[u_movies[i] * K + k]; // add bias
Vt[i * K + k] = exp(Vt[i * K + k]); // exponential
sum_k += Vt[i * K + k];
}
for (int k = 0; k < K; k++) {
Vt[i * K + k] /= sum_k;
}
}
// compute Ht
for (int i = 0; i < size; i++) {
float* W_user = W + u_movies[i] * (K * F);
for (int j = 0; j < F; j++) {
for (int k = 0; k < K; k++) {
Ht[j] += W_user[j * K + k] * Vt[i * K + k];
}
}
}
// add bias and logistic function on Ht
for (int j = 0; j < F; j++) {
Ht[j] += BV[j];
Ht[j] = 1.0 / (1 + exp(-Ht[j]));
}
//////////////// update weight increments ////////////////
// update BV_inc
for (int i = 0; i < size; i++) {
for (int k = 0; k < K; k++) {
BV_inc[u_movies[i] * K + k] += (V0[i * K + k] - Vt[i * K + k]);
}
}
// update W_inc
for (int i = 0; i < size; i++) {
for (int j = 0; j < F; j++) {
for (int k = 0; k < K; k++) {
W_inc[u_movies[i] * K * F + j * K + k] += (H0[j] * V0[i * K + k] - Ht[j] * Vt[i * K + k]);
}
}
}
// update BH_inc
for (int j = 0; j < F; j++) {
BH_inc[user * F + j] = (H0[j] - Ht[j]);
}
} // end update weights
}
user += blockDim.x * gridDim.x;
}
}
__global__
void updateW_kernel(float* W, float* W_inc, const unsigned int M, const float lrate) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < K * F * M) {
W[i] += lrate * W_inc[i];
i += blockDim.x * gridDim.x;
}
}
__global__
void updateBV_kernel(float* BV, float* BV_inc, const unsigned int M, const float lrate) {
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < K * M) {
BV[i] += lrate * BV_inc[i];
i += blockDim.x * gridDim.x;
}
}
__global__
void updateBH_kernel(float* BH, float* BH_inc, const float lrate_BH, const int batch_size) {
extern __shared__ float sBH_inc[];
unsigned int tid = threadIdx.x;
sBH_inc[tid] = 0;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
while (i < batch_size) {
sBH_inc[tid] += BH_inc[i * F];
i += blockDim.x * gridDim.x;
}
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1) {
if (tid < s) {
sBH_inc[tid] += sBH_inc[tid + s];
}
__syncthreads();
}
if (tid < 32) {
atomicAdd(&sBH_inc[tid], sBH_inc[tid + 32]);
atomicAdd(&sBH_inc[tid], sBH_inc[tid + 16]);
atomicAdd(&sBH_inc[tid], sBH_inc[tid + 8]);
atomicAdd(&sBH_inc[tid], sBH_inc[tid + 4]);
atomicAdd(&sBH_inc[tid], sBH_inc[tid + 2]);
atomicAdd(&sBH_inc[tid], sBH_inc[tid + 1]);
}
if (tid == 0)
atomicAdd(BH, lrate_BH * sBH_inc[0]);
}
__global__
void predictKernel(int* test_vec_in_batch, int* test_movies_in_batch, int* test_ratings_in_batch,
float* Hzeros, float* Vts,
float* W, float* BV,
int batch_size, const int num_test_movies_in_this_batch, const int i_test_batch_start,
float* results_in_batch) {
unsigned int user = blockIdx.x * blockDim.x + threadIdx.x;
while (user < batch_size) {
int start = test_vec_in_batch[2 * user];
int end = test_vec_in_batch[2 * user + 1];
int size = end - start;
start -= i_test_batch_start;
if (size != 0) {
float* H0 = Hzeros + user * F;
float* Vt = Vts + K * start;
int* u_movies = test_movies_in_batch + start;
//////////////// negative phase ////////////////
for (int i = 0; i < size; i++) {
float* W_user = W + u_movies[i] * (K * F);
for (int j = 0; j < F; j++) {
for (int k = 0; k < K; k++) {
Vt[i * K + k] += H0[j] * W_user[j * K + k];
}
}
// normalize Vt
float sum_k = 0.0;
for (int k = 0; k < K; k++) {
Vt[i * K + k] += BV[u_movies[i] * K + k]; // add bias
Vt[i * K + k] = exp(Vt[i * K + k]); // exponential
sum_k += Vt[i * K + k];
}
for (int k = 0; k < K; k++) {
Vt[i * K + k] /= sum_k;
}
// update results
float score = 0;
for (int k = 0; k < K; k++) {
score += (k + 1) * Vt[i * K + k];
}
results_in_batch[start + i] = score;
}
}
user += blockDim.x * gridDim.x;
}
}
void train(int* train_vec_in_batch, int* movies_in_batch, int* ratings_in_batch,
int* test_vec_in_batch, int* test_movies_in_batch, int* test_ratings_in_batch,
float* Vzeros, float* Vts, float* Hzeros, float* Hts,
float* W, float* BV, float* BH, float* W_inc, float* BV_inc, float* BH_inc,
int batch_size, int num_movies_in_this_batch, const int i_batch_start,
const int num_test_movies_in_this_batch, const int i_test_batch_start,
const unsigned int M, const float lrate, const float lrate_BH,
float* results_in_batch, const bool update_weights,
int blocks, int threadsPerBlock) {
if (update_weights) {
trainKernel<<<blocks, threadsPerBlock>>>
(train_vec_in_batch, movies_in_batch, ratings_in_batch,
Vzeros, Vts, Hzeros, Hts,
W, BV, BH,
W_inc, BV_inc, BH_inc,
batch_size, num_movies_in_this_batch, i_batch_start,
true);
unsigned int Wblocks = min(blocks, (int)ceil(K * F * M / (float)threadsPerBlock));
updateW_kernel<<<Wblocks, threadsPerBlock>>>(W, W_inc, M, lrate);
unsigned int BVblocks = min(blocks, (int)ceil(K * M / (float)threadsPerBlock));
updateBV_kernel<<<BVblocks, threadsPerBlock>>>(BV, BV_inc, M, lrate);
unsigned int BHblocks = min(blocks, (int)ceil(batch_size / (float)threadsPerBlock));
for (int j = 0; j < F; j++) {
updateBH_kernel<<<BHblocks, threadsPerBlock, threadsPerBlock * sizeof(float)>>>
(BH + j, BH_inc + j, lrate_BH, batch_size);
}
}
// in prediction stage
else {
trainKernel<<<blocks, threadsPerBlock>>>
(train_vec_in_batch, movies_in_batch, ratings_in_batch,
Vzeros, Vts, Hzeros, Hts,
W, BV, BH,
W_inc, BV_inc, BH_inc,
batch_size, num_movies_in_this_batch, i_batch_start,
false);
// TODO: update Vt, and compute results
predictKernel<<<blocks, threadsPerBlock>>>
(test_vec_in_batch, test_movies_in_batch, test_ratings_in_batch,
Hzeros, Vts,
W, BV,
batch_size, num_test_movies_in_this_batch, i_test_batch_start,
results_in_batch);
}
}
|
92b17289425e619a0740a83a6e7227623e38b27b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ static void solveEnd ( double* data, const double a, const double b, const double d, const double e, const double omega_11, const double omega_12, const double omega_21, const double omega_22, const int nx, const int nBatch )
{
// Matrix index
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
// Last two vectors
double newNx2;
double newNx1;
// Compute lambda = d^~ - transpose(g) * inverse(E) * d_hat
newNx2 = data[(nx - 2) * nBatch + globalIdx] - (e * data[globalIdx] + a * data[(nx - 4) * nBatch + globalIdx] + b * data[(nx - 3) * nBatch + globalIdx]);
newNx1 = data[(nx - 1) * nBatch + globalIdx] - (d * data[globalIdx] + e * data[nBatch + globalIdx] + a * data[(nx - 3) * nBatch + globalIdx]);
// Compute x^~ = omega * lambda
data[(nx - 2) * nBatch + globalIdx] = omega_11 * newNx2 + omega_12 * newNx1;
data[(nx - 1) * nBatch + globalIdx] = omega_21 * newNx2 + omega_22 * newNx1;
} | 92b17289425e619a0740a83a6e7227623e38b27b.cu | #include "includes.h"
__global__ static void solveEnd ( double* data, const double a, const double b, const double d, const double e, const double omega_11, const double omega_12, const double omega_21, const double omega_22, const int nx, const int nBatch )
{
// Matrix index
int globalIdx = blockDim.x * blockIdx.x + threadIdx.x;
// Last two vectors
double newNx2;
double newNx1;
// Compute lambda = d^~ - transpose(g) * inverse(E) * d_hat
newNx2 = data[(nx - 2) * nBatch + globalIdx] - (e * data[globalIdx] + a * data[(nx - 4) * nBatch + globalIdx] + b * data[(nx - 3) * nBatch + globalIdx]);
newNx1 = data[(nx - 1) * nBatch + globalIdx] - (d * data[globalIdx] + e * data[nBatch + globalIdx] + a * data[(nx - 3) * nBatch + globalIdx]);
// Compute x^~ = omega * lambda
data[(nx - 2) * nBatch + globalIdx] = omega_11 * newNx2 + omega_12 * newNx1;
data[(nx - 1) * nBatch + globalIdx] = omega_21 * newNx2 + omega_22 * newNx1;
} |
41c5a19c5208b9617d9510a4727a129e0a420d3b.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @internal
* @author Oded Green <br>
* Georgia Institute of Technology, Computational Science and Engineering <br>
* [email protected]
* @date August, 2017
* @version v2
*
* @copyright Copyright 2017 cuStinger. All rights reserved.
*
* @license{<blockquote>
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* </blockquote>}
*
* @file
*/
#include "Dynamic/KatzCentrality/Katz.cuh"
#include "KatzOperators.cuh"
namespace hornets_nest {
KatzCentralityDynamic::KatzCentralityDynamic(HornetGraph& hornet,
HornetGraph& inverted_graph,
int max_iteration, int K,
degree_t max_degree) :
StaticAlgorithm(hornet),
load_balacing(hornet),
inverted_graph(inverted_graph),
is_directed(false),
kc_static(hornet, max_iteration, K,
max_degree, false) {
hd_katzdata().active_queue.initialize(hornet);
gpu::allocate(hd_katzdata().new_paths_curr, hornet.nV());
gpu::allocate(hd_katzdata().new_paths_prev, hornet.nV());
gpu::allocate(hd_katzdata().active, hornet.nV());
hd_katzdata = kc_static.katz_data();
std::cout << "Oded remember to take care of memory de-allocation\n"
<< "Oded need to figure out correct API for dynamic graph"
<< "algorithms\n"
<< "Dynamic katz centrality algorithm needs to get both the"
<< "original graph and the inverted graph for directed graphs"
<< std::endl;
}
KatzCentralityDynamic::KatzCentralityDynamic(HornetGraph& hornet,
int max_iteration, int K,
degree_t max_degree) :
StaticAlgorithm(hornet),
load_balacing(hornet),
inverted_graph(inverted_graph),
is_directed(true),
kc_static(inverted_graph, max_iteration, K,
max_degree, true) {
hd_katzdata().active_queue.initialize(hornet);
gpu::allocate(hd_katzdata().new_paths_curr, hornet.nV());
gpu::allocate(hd_katzdata().new_paths_prev, hornet.nV());
gpu::allocate(hd_katzdata().active, hornet.nV());
hd_katzdata = kc_static.katz_data();
std::cout << "Oded remember to take care of memory de-allocation\n"
<< "Oded need to figure out correct API for dynamic graph"
<< "algorithms\n"
<< "Dynamic katz centrality algorithm needs to get both the"
<< "original graph and the inverted graph for directed graphs"
<< std::endl;
}
KatzCentralityDynamic::~KatzCentralityDynamic() {
release();
}
void KatzCentralityDynamic::run_static() {
// Executing the static graph algorithm
kc_static.reset();
kc_static.run();
hd_katzdata().iteration_static = hd_katzdata().iteration;
// Initializing the fields of the dynamic graph algorithm
forAllnumV(hornet, InitStreaming { hd_katzdata } );
}
void KatzCentralityDynamic::release(){
gpu::free(hd_katzdata().new_paths_curr);
gpu::free(hd_katzdata().new_paths_prev);
gpu::free(hd_katzdata().active);
}
//==============================================================================
void KatzCentralityDynamic::processUpdate(BatchUpdate& batch_update,
bool is_insert) {
// Resetting the queue of the active vertices.
hd_katzdata().active_queue.clear();
hd_katzdata().iteration = 1;
// Initialization of insertions or deletions is slightly different.
if (is_insert)
forAllEdges(hornet, batch_update, SetupInsertions { hd_katzdata });
else
forAllEdges(hornet, batch_update, SetupDeletions { hd_katzdata } );
hd_katzdata.sync();
hd_katzdata().iteration = 2;
while (hd_katzdata().iteration < hd_katzdata().max_iteration &&
hd_katzdata().iteration < hd_katzdata().iteration_static) {
hd_katzdata().alphaI = ::pow(hd_katzdata().alpha,
hd_katzdata().iteration);
forAll(hd_katzdata().active_queue, //hd_katzdata().num_active
InitActiveNewPaths { hd_katzdata });
// Undirected graphs and directed graphs need to be dealt with differently.
if (!is_directed) {
forAllEdges(hornet, hd_katzdata().active_queue,
FindNextActive { hd_katzdata }, load_balacing);
hd_katzdata.sync(); // Syncing queue info
forAllEdges(hornet, hd_katzdata().active_queue,
UpdateActiveNewPaths { hd_katzdata },
load_balacing );
}
else {
forAllEdges(inverted_graph, hd_katzdata().active_queue,
FindNextActive { hd_katzdata }, load_balacing);
hd_katzdata.sync();
forAllEdges(inverted_graph, hd_katzdata().active_queue,
UpdateActiveNewPaths { hd_katzdata }, load_balacing);
}
hd_katzdata.sync(); // Syncing queue info
// Checking if we are dealing with a batch of insertions or deletions.
if (is_insert) {
forAllEdges(hornet, batch_update,
UpdateNewPathsBatchInsert { hd_katzdata });
}
else {
forAllEdges(hornet, batch_update,
UpdateNewPathsBatchDelete { hd_katzdata });
}
hd_katzdata.sync();
forAll(hd_katzdata().active_queue, UpdatePrevWithCurr { hd_katzdata });
hd_katzdata.sync();
hd_katzdata().iteration++;
}
if (hd_katzdata().iteration > 2) {
forAll(hd_katzdata().active_queue, //hd_katzdata().num_active?
UpdateLastIteration { hd_katzdata } );
hd_katzdata.sync();
}
// Resetting the fields of the dynamic graph algorithm for all the vertices
// that were active
//hd_katzdata().num_active ??
forAll(hd_katzdata().active_queue, InitStreaming {hd_katzdata});
}
//------------------------------------------------------------------------------
int KatzCentralityDynamic::get_iteration_count(){
return hd_katzdata().iteration;
}
void KatzCentralityDynamic::batchUpdateInserted(BatchUpdate &batch_update) {
processUpdate(batch_update, true);
}
void KatzCentralityDynamic::batchUpdateDeleted(BatchUpdate &batch_update) {
processUpdate(batch_update, false);
}
void KatzCentralityDynamic::copyKCToHost(double* host_array) {
kc_static.copyKCToHost(host_array);
}
void KatzCentralityDynamic::copyNumPathsToHost(ulong_t* host_array) {
kc_static.copyNumPathsToHost(host_array);
}
}// cuStingerAlgs namespace
| 41c5a19c5208b9617d9510a4727a129e0a420d3b.cu | /**
* @internal
* @author Oded Green <br>
* Georgia Institute of Technology, Computational Science and Engineering <br>
* [email protected]
* @date August, 2017
* @version v2
*
* @copyright Copyright © 2017 cuStinger. All rights reserved.
*
* @license{<blockquote>
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* </blockquote>}
*
* @file
*/
#include "Dynamic/KatzCentrality/Katz.cuh"
#include "KatzOperators.cuh"
namespace hornets_nest {
KatzCentralityDynamic::KatzCentralityDynamic(HornetGraph& hornet,
HornetGraph& inverted_graph,
int max_iteration, int K,
degree_t max_degree) :
StaticAlgorithm(hornet),
load_balacing(hornet),
inverted_graph(inverted_graph),
is_directed(false),
kc_static(hornet, max_iteration, K,
max_degree, false) {
hd_katzdata().active_queue.initialize(hornet);
gpu::allocate(hd_katzdata().new_paths_curr, hornet.nV());
gpu::allocate(hd_katzdata().new_paths_prev, hornet.nV());
gpu::allocate(hd_katzdata().active, hornet.nV());
hd_katzdata = kc_static.katz_data();
std::cout << "Oded remember to take care of memory de-allocation\n"
<< "Oded need to figure out correct API for dynamic graph"
<< "algorithms\n"
<< "Dynamic katz centrality algorithm needs to get both the"
<< "original graph and the inverted graph for directed graphs"
<< std::endl;
}
KatzCentralityDynamic::KatzCentralityDynamic(HornetGraph& hornet,
int max_iteration, int K,
degree_t max_degree) :
StaticAlgorithm(hornet),
load_balacing(hornet),
inverted_graph(inverted_graph),
is_directed(true),
kc_static(inverted_graph, max_iteration, K,
max_degree, true) {
hd_katzdata().active_queue.initialize(hornet);
gpu::allocate(hd_katzdata().new_paths_curr, hornet.nV());
gpu::allocate(hd_katzdata().new_paths_prev, hornet.nV());
gpu::allocate(hd_katzdata().active, hornet.nV());
hd_katzdata = kc_static.katz_data();
std::cout << "Oded remember to take care of memory de-allocation\n"
<< "Oded need to figure out correct API for dynamic graph"
<< "algorithms\n"
<< "Dynamic katz centrality algorithm needs to get both the"
<< "original graph and the inverted graph for directed graphs"
<< std::endl;
}
KatzCentralityDynamic::~KatzCentralityDynamic() {
release();
}
void KatzCentralityDynamic::run_static() {
// Executing the static graph algorithm
kc_static.reset();
kc_static.run();
hd_katzdata().iteration_static = hd_katzdata().iteration;
// Initializing the fields of the dynamic graph algorithm
forAllnumV(hornet, InitStreaming { hd_katzdata } );
}
void KatzCentralityDynamic::release(){
gpu::free(hd_katzdata().new_paths_curr);
gpu::free(hd_katzdata().new_paths_prev);
gpu::free(hd_katzdata().active);
}
//==============================================================================
void KatzCentralityDynamic::processUpdate(BatchUpdate& batch_update,
bool is_insert) {
// Resetting the queue of the active vertices.
hd_katzdata().active_queue.clear();
hd_katzdata().iteration = 1;
// Initialization of insertions or deletions is slightly different.
if (is_insert)
forAllEdges(hornet, batch_update, SetupInsertions { hd_katzdata });
else
forAllEdges(hornet, batch_update, SetupDeletions { hd_katzdata } );
hd_katzdata.sync();
hd_katzdata().iteration = 2;
while (hd_katzdata().iteration < hd_katzdata().max_iteration &&
hd_katzdata().iteration < hd_katzdata().iteration_static) {
hd_katzdata().alphaI = std::pow(hd_katzdata().alpha,
hd_katzdata().iteration);
forAll(hd_katzdata().active_queue, //hd_katzdata().num_active
InitActiveNewPaths { hd_katzdata });
// Undirected graphs and directed graphs need to be dealt with differently.
if (!is_directed) {
forAllEdges(hornet, hd_katzdata().active_queue,
FindNextActive { hd_katzdata }, load_balacing);
hd_katzdata.sync(); // Syncing queue info
forAllEdges(hornet, hd_katzdata().active_queue,
UpdateActiveNewPaths { hd_katzdata },
load_balacing );
}
else {
forAllEdges(inverted_graph, hd_katzdata().active_queue,
FindNextActive { hd_katzdata }, load_balacing);
hd_katzdata.sync();
forAllEdges(inverted_graph, hd_katzdata().active_queue,
UpdateActiveNewPaths { hd_katzdata }, load_balacing);
}
hd_katzdata.sync(); // Syncing queue info
// Checking if we are dealing with a batch of insertions or deletions.
if (is_insert) {
forAllEdges(hornet, batch_update,
UpdateNewPathsBatchInsert { hd_katzdata });
}
else {
forAllEdges(hornet, batch_update,
UpdateNewPathsBatchDelete { hd_katzdata });
}
hd_katzdata.sync();
forAll(hd_katzdata().active_queue, UpdatePrevWithCurr { hd_katzdata });
hd_katzdata.sync();
hd_katzdata().iteration++;
}
if (hd_katzdata().iteration > 2) {
forAll(hd_katzdata().active_queue, //hd_katzdata().num_active?
UpdateLastIteration { hd_katzdata } );
hd_katzdata.sync();
}
// Resetting the fields of the dynamic graph algorithm for all the vertices
// that were active
//hd_katzdata().num_active ??
forAll(hd_katzdata().active_queue, InitStreaming {hd_katzdata});
}
//------------------------------------------------------------------------------
int KatzCentralityDynamic::get_iteration_count(){
return hd_katzdata().iteration;
}
void KatzCentralityDynamic::batchUpdateInserted(BatchUpdate &batch_update) {
processUpdate(batch_update, true);
}
void KatzCentralityDynamic::batchUpdateDeleted(BatchUpdate &batch_update) {
processUpdate(batch_update, false);
}
void KatzCentralityDynamic::copyKCToHost(double* host_array) {
kc_static.copyKCToHost(host_array);
}
void KatzCentralityDynamic::copyNumPathsToHost(ulong_t* host_array) {
kc_static.copyNumPathsToHost(host_array);
}
}// cuStingerAlgs namespace
|
491ac7ed55fef73abb5a2d04173f36ddb3a6ae90.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
{
__global__ void Dstanh_32(const int lengthX, const float sf, const float *gradc, const float *fc, float *gradn)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthX)
{
gradn[i] += sf*gradc[i]*(1.0-(fc[i]/sf)*(fc[i]/sf));
}
}
} | 491ac7ed55fef73abb5a2d04173f36ddb3a6ae90.cu | extern "C"
{
__global__ void Dstanh_32(const int lengthX, const float sf, const float *gradc, const float *fc, float *gradn)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthX)
{
gradn[i] += sf*gradc[i]*(1.0-(fc[i]/sf)*(fc[i]/sf));
}
}
} |
clover_quda.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <quda_internal.h>
#include <quda_matrix.h>
#include <tune_quda.h>
#include <clover_field.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <clover_field_order.h>
namespace quda {
#ifdef GPU_CLOVER_DIRAC
template<typename Float, typename Clover, typename Fmunu>
struct CloverArg {
int threads; // number of active threads required
int X[4]; // grid dimensions
#ifdef MULTI_GPU
int border[4];
#endif
double cloverCoeff;
Clover clover;
Fmunu f;
CloverArg(Clover &clover, Fmunu& f, const GaugeField &meta, double cloverCoeff)
: threads(meta.Volume()),
cloverCoeff(cloverCoeff),
clover(clover),
f(f)
{
for(int dir=0; dir<4; ++dir) X[dir] = meta.X()[dir];
#ifdef MULTI_GPU
for(int dir=0; dir<4; ++dir){
border[dir] = 2;
}
#endif
}
};
/*
Put into clover order
Upper-left block (chirality index 0)
/ \
| 1 + c*I*(F[0,1] - F[2,3]) , c*I*(F[1,2] - F[0,3]) + c*(F[0,2] + F[1,3]) |
| |
| c*I*(F[1,2] - F[0,3]) - c*(F[0,2] + F[1,3]), 1 - c*I*(F[0,1] - F[2,3]) |
| |
\ /
/
| 1 - c*I*(F[0] - F[5]), -c*I*(F[2] - F[3]) - c*(F[1] + F[4])
|
| -c*I*(F[2] -F[3]) + c*(F[1] + F[4]), 1 + c*I*(F[0] - F[5])
|
\
Lower-right block (chirality index 1)
/ \
| 1 - c*I*(F[0] + F[5]), -c*I*(F[2] + F[3]) - c*(F[1] - F[4]) |
| |
| -c*I*(F[2]+F[3]) + c*(F[1]-F[4]), 1 + c*I*(F[0] + F[5]) |
\ /
*/
// Core routine for constructing clover term from field strength
template<typename Float, typename Clover, typename Fmunu>
__device__ __host__
void cloverComputeCore(CloverArg<Float,Clover,Fmunu>& arg, int idx){
int parity = 0;
if(idx >= arg.threads/2){
parity = 1;
idx -= arg.threads/2;
}
typedef complex<Float> Complex;
// Load the field-strength tensor from global memory
Matrix<Complex,3> F[6];
for(int i=0; i<6; ++i){
arg.f.load((Float*)(F[i].data), idx, i, parity);
}
Complex I; I.x = 0; I.y = 1.0;
Complex coeff; coeff.x = 0; coeff.y = arg.cloverCoeff;
Matrix<Complex,3> block1[2];
Matrix<Complex,3> block2[2];
block1[0] = coeff*(F[0]-F[5]); // (18 + 6*9=) 72 floating-point ops
block1[1] = coeff*(F[0]+F[5]); // 72 floating-point ops
block2[0] = arg.cloverCoeff*(F[1]+F[4] - I*(F[2]-F[3])); // 126 floating-point ops
block2[1] = arg.cloverCoeff*(F[1]-F[4] - I*(F[2]+F[3])); // 126 floating-point ops
const int idtab[15]={0,1,3,6,10,2,4,7,11,5,8,12,9,13,14};
Float diag[6];
Complex triangle[15];
// This uses lots of unnecessary memory
for(int ch=0; ch<2; ++ch){
// c = 0(1) => positive(negative) chiral block
// Compute real diagonal elements
for(int i=0; i<3; ++i){
diag[i] = 1.0 - block1[ch](i,i).x;
diag[i+3] = 1.0 + block1[ch](i,i).x;
}
// Compute off diagonal components
// First row
triangle[0] = - block1[ch](1,0);
// Second row
triangle[1] = - block1[ch](2,0);
triangle[2] = - block1[ch](2,1);
// Third row
triangle[3] = block2[ch](0,0);
triangle[4] = block2[ch](0,1);
triangle[5] = block2[ch](0,2);
// Fourth row
triangle[6] = block2[ch](1,0);
triangle[7] = block2[ch](1,1);
triangle[8] = block2[ch](1,2);
triangle[9] = block1[ch](1,0);
// Fifth row
triangle[10] = block2[ch](2,0);
triangle[11] = block2[ch](2,1);
triangle[12] = block2[ch](2,2);
triangle[13] = block1[ch](2,0);
triangle[14] = block1[ch](2,1);
Float A[36];
for(int i=0; i<6; ++i) A[i] = static_cast<Float>(0.5)*diag[i];
for(int i=0; i<15; ++i){
A[6+2*i] = 0.5*triangle[idtab[i]].x;
A[6+2*i + 1] = 0.5*triangle[idtab[i]].y;
}
arg.clover.save(A, idx, parity, ch);
} // ch
// 84 floating-point ops
return;
}
template<typename Float, typename Clover, typename Fmunu>
__global__
void cloverComputeKernel(CloverArg<Float,Clover,Fmunu> arg){
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx >= arg.threads) return;
cloverComputeCore(arg, idx);
}
template<typename Float, typename Clover, typename Fmunu>
void cloverComputeCPU(CloverArg<Float,Clover,Fmunu> arg){
for(int idx=0; idx<arg.threads; ++idx){
cloverComputeCore(arg, idx);
}
}
template<typename Float, typename Clover, typename Fmunu>
class CloverCompute : Tunable {
CloverArg<Float,Clover,Fmunu> arg;
const GaugeField &meta;
const QudaFieldLocation location;
private:
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneSharedBytes() const { return false; } // Don't tune the shared memory.
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return arg.threads; }
public:
CloverCompute(CloverArg<Float,Clover,Fmunu> &arg, const GaugeField &meta, QudaFieldLocation location)
: arg(arg), meta(meta), location(location) {
writeAuxString("threads=%d,stride=%d,prec=%lu",arg.threads,arg.clover.stride,sizeof(Float));
}
virtual ~CloverCompute() {}
void apply(const hipStream_t &stream) {
if(location == QUDA_CUDA_FIELD_LOCATION){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
hipLaunchKernelGGL(( cloverComputeKernel), dim3(tp.grid),dim3(tp.block),tp.shared_bytes, 0, arg);
} else { // run the CPU code
cloverComputeCPU(arg);
}
}
TuneKey tuneKey() const {
return TuneKey(meta.VolString(), typeid(*this).name(), aux);
}
long long flops() const { return 480*arg.threads; }
long long bytes() const { return arg.threads*(6*18 + 72)*sizeof(Float); }
};
template<typename Float, typename Clover, typename Fmunu>
void computeClover(Clover clover, Fmunu f, const GaugeField &meta, Float cloverCoeff, QudaFieldLocation location){
CloverArg<Float,Clover,Fmunu> arg(clover, f, meta, cloverCoeff);
CloverCompute<Float,Clover,Fmunu> cloverCompute(arg, meta, location);
cloverCompute.apply(0);
checkCudaError();
hipDeviceSynchronize();
}
template<typename Float>
void computeClover(CloverField &clover, const GaugeField &f, Float cloverCoeff, QudaFieldLocation location){
if (f.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
if (clover.isNative()) {
typedef typename clover_mapper<Float>::type C;
computeClover(C(clover,0), gauge::FloatNOrder<Float,18,2,18>(f), f, cloverCoeff, location);
} else {
errorQuda("Clover field order %d not supported", clover.Order());
} // clover order
} else {
errorQuda("Fmunu field order %d not supported", f.Precision());
}
}
#endif
void computeClover(CloverField &clover, const GaugeField& f, double cloverCoeff, QudaFieldLocation location){
#ifdef GPU_CLOVER_DIRAC
if(clover.Precision() != f.Precision()){
errorQuda("Fmunu precision %d must match gauge precision %d", clover.Precision(), f.Precision());
}
if (clover.Precision() == QUDA_DOUBLE_PRECISION){
computeClover<double>(clover, f, cloverCoeff, location);
} else if(clover.Precision() == QUDA_SINGLE_PRECISION) {
computeClover<float>(clover, f, cloverCoeff, location);
} else {
errorQuda("Precision %d not supported", clover.Precision());
}
return;
#else
errorQuda("Clover has not been built");
#endif
}
} // namespace quda
| clover_quda.cu | #include <quda_internal.h>
#include <quda_matrix.h>
#include <tune_quda.h>
#include <clover_field.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <clover_field_order.h>
namespace quda {
#ifdef GPU_CLOVER_DIRAC
template<typename Float, typename Clover, typename Fmunu>
struct CloverArg {
int threads; // number of active threads required
int X[4]; // grid dimensions
#ifdef MULTI_GPU
int border[4];
#endif
double cloverCoeff;
Clover clover;
Fmunu f;
CloverArg(Clover &clover, Fmunu& f, const GaugeField &meta, double cloverCoeff)
: threads(meta.Volume()),
cloverCoeff(cloverCoeff),
clover(clover),
f(f)
{
for(int dir=0; dir<4; ++dir) X[dir] = meta.X()[dir];
#ifdef MULTI_GPU
for(int dir=0; dir<4; ++dir){
border[dir] = 2;
}
#endif
}
};
/*
Put into clover order
Upper-left block (chirality index 0)
/ \
| 1 + c*I*(F[0,1] - F[2,3]) , c*I*(F[1,2] - F[0,3]) + c*(F[0,2] + F[1,3]) |
| |
| c*I*(F[1,2] - F[0,3]) - c*(F[0,2] + F[1,3]), 1 - c*I*(F[0,1] - F[2,3]) |
| |
\ /
/
| 1 - c*I*(F[0] - F[5]), -c*I*(F[2] - F[3]) - c*(F[1] + F[4])
|
| -c*I*(F[2] -F[3]) + c*(F[1] + F[4]), 1 + c*I*(F[0] - F[5])
|
\
Lower-right block (chirality index 1)
/ \
| 1 - c*I*(F[0] + F[5]), -c*I*(F[2] + F[3]) - c*(F[1] - F[4]) |
| |
| -c*I*(F[2]+F[3]) + c*(F[1]-F[4]), 1 + c*I*(F[0] + F[5]) |
\ /
*/
// Core routine for constructing clover term from field strength
template<typename Float, typename Clover, typename Fmunu>
__device__ __host__
void cloverComputeCore(CloverArg<Float,Clover,Fmunu>& arg, int idx){
int parity = 0;
if(idx >= arg.threads/2){
parity = 1;
idx -= arg.threads/2;
}
typedef complex<Float> Complex;
// Load the field-strength tensor from global memory
Matrix<Complex,3> F[6];
for(int i=0; i<6; ++i){
arg.f.load((Float*)(F[i].data), idx, i, parity);
}
Complex I; I.x = 0; I.y = 1.0;
Complex coeff; coeff.x = 0; coeff.y = arg.cloverCoeff;
Matrix<Complex,3> block1[2];
Matrix<Complex,3> block2[2];
block1[0] = coeff*(F[0]-F[5]); // (18 + 6*9=) 72 floating-point ops
block1[1] = coeff*(F[0]+F[5]); // 72 floating-point ops
block2[0] = arg.cloverCoeff*(F[1]+F[4] - I*(F[2]-F[3])); // 126 floating-point ops
block2[1] = arg.cloverCoeff*(F[1]-F[4] - I*(F[2]+F[3])); // 126 floating-point ops
const int idtab[15]={0,1,3,6,10,2,4,7,11,5,8,12,9,13,14};
Float diag[6];
Complex triangle[15];
// This uses lots of unnecessary memory
for(int ch=0; ch<2; ++ch){
// c = 0(1) => positive(negative) chiral block
// Compute real diagonal elements
for(int i=0; i<3; ++i){
diag[i] = 1.0 - block1[ch](i,i).x;
diag[i+3] = 1.0 + block1[ch](i,i).x;
}
// Compute off diagonal components
// First row
triangle[0] = - block1[ch](1,0);
// Second row
triangle[1] = - block1[ch](2,0);
triangle[2] = - block1[ch](2,1);
// Third row
triangle[3] = block2[ch](0,0);
triangle[4] = block2[ch](0,1);
triangle[5] = block2[ch](0,2);
// Fourth row
triangle[6] = block2[ch](1,0);
triangle[7] = block2[ch](1,1);
triangle[8] = block2[ch](1,2);
triangle[9] = block1[ch](1,0);
// Fifth row
triangle[10] = block2[ch](2,0);
triangle[11] = block2[ch](2,1);
triangle[12] = block2[ch](2,2);
triangle[13] = block1[ch](2,0);
triangle[14] = block1[ch](2,1);
Float A[36];
for(int i=0; i<6; ++i) A[i] = static_cast<Float>(0.5)*diag[i];
for(int i=0; i<15; ++i){
A[6+2*i] = 0.5*triangle[idtab[i]].x;
A[6+2*i + 1] = 0.5*triangle[idtab[i]].y;
}
arg.clover.save(A, idx, parity, ch);
} // ch
// 84 floating-point ops
return;
}
template<typename Float, typename Clover, typename Fmunu>
__global__
void cloverComputeKernel(CloverArg<Float,Clover,Fmunu> arg){
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx >= arg.threads) return;
cloverComputeCore(arg, idx);
}
template<typename Float, typename Clover, typename Fmunu>
void cloverComputeCPU(CloverArg<Float,Clover,Fmunu> arg){
for(int idx=0; idx<arg.threads; ++idx){
cloverComputeCore(arg, idx);
}
}
template<typename Float, typename Clover, typename Fmunu>
class CloverCompute : Tunable {
CloverArg<Float,Clover,Fmunu> arg;
const GaugeField &meta;
const QudaFieldLocation location;
private:
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0; }
bool tuneSharedBytes() const { return false; } // Don't tune the shared memory.
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return arg.threads; }
public:
CloverCompute(CloverArg<Float,Clover,Fmunu> &arg, const GaugeField &meta, QudaFieldLocation location)
: arg(arg), meta(meta), location(location) {
writeAuxString("threads=%d,stride=%d,prec=%lu",arg.threads,arg.clover.stride,sizeof(Float));
}
virtual ~CloverCompute() {}
void apply(const cudaStream_t &stream) {
if(location == QUDA_CUDA_FIELD_LOCATION){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
cloverComputeKernel<<<tp.grid,tp.block,tp.shared_bytes>>>(arg);
} else { // run the CPU code
cloverComputeCPU(arg);
}
}
TuneKey tuneKey() const {
return TuneKey(meta.VolString(), typeid(*this).name(), aux);
}
long long flops() const { return 480*arg.threads; }
long long bytes() const { return arg.threads*(6*18 + 72)*sizeof(Float); }
};
template<typename Float, typename Clover, typename Fmunu>
void computeClover(Clover clover, Fmunu f, const GaugeField &meta, Float cloverCoeff, QudaFieldLocation location){
CloverArg<Float,Clover,Fmunu> arg(clover, f, meta, cloverCoeff);
CloverCompute<Float,Clover,Fmunu> cloverCompute(arg, meta, location);
cloverCompute.apply(0);
checkCudaError();
cudaDeviceSynchronize();
}
template<typename Float>
void computeClover(CloverField &clover, const GaugeField &f, Float cloverCoeff, QudaFieldLocation location){
if (f.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
if (clover.isNative()) {
typedef typename clover_mapper<Float>::type C;
computeClover(C(clover,0), gauge::FloatNOrder<Float,18,2,18>(f), f, cloverCoeff, location);
} else {
errorQuda("Clover field order %d not supported", clover.Order());
} // clover order
} else {
errorQuda("Fmunu field order %d not supported", f.Precision());
}
}
#endif
void computeClover(CloverField &clover, const GaugeField& f, double cloverCoeff, QudaFieldLocation location){
#ifdef GPU_CLOVER_DIRAC
if(clover.Precision() != f.Precision()){
errorQuda("Fmunu precision %d must match gauge precision %d", clover.Precision(), f.Precision());
}
if (clover.Precision() == QUDA_DOUBLE_PRECISION){
computeClover<double>(clover, f, cloverCoeff, location);
} else if(clover.Precision() == QUDA_SINGLE_PRECISION) {
computeClover<float>(clover, f, cloverCoeff, location);
} else {
errorQuda("Precision %d not supported", clover.Precision());
}
return;
#else
errorQuda("Clover has not been built");
#endif
}
} // namespace quda
|
e37329b0669c4ed69f568e26c66f0122d6292917.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/user/kernels/random_mask_generator.h"
namespace oneflow {
namespace {
constexpr int32_t kMinPackPerThread = 2;
using PackType = ulonglong2;
union Pack {
PackType p_value;
int8_t b_value[sizeof(PackType)];
};
int GetThreadNum(const hipDeviceProp_t& prop) {
switch (prop.major) {
case 3: // Kepler
return 2 * 192;
case 5: // Maxwell
return 2 * 128;
case 6: // Pascal
if ((prop.minor == 1) || (prop.minor == 2)) {
return 2 * 128;
} else {
return 2 * 64;
}
case 7: // Volta and Turing
return 2 * 64;
default: return 2 * 64;
}
}
__device__ int8_t GenMask(hiprandState_t* state, const float rate) {
return hiprand_uniform(state) >= rate;
}
__global__ void SetupKernel(int64_t seed, hiprandState_t* state) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
size_t local_seed = (static_cast<size_t>(seed) + 0x9e3779b9U + (static_cast<size_t>(id) << 6U)
+ (static_cast<size_t>(id) >> 2U));
hiprand_init(local_seed, 0, 0, &state[id]);
}
__global__ void GenerateGpu(hiprandState_t* state, const int64_t n, const float rate, int8_t* mask) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
hiprandState_t localState = state[id];
PackType* pack_mask = reinterpret_cast<PackType*>(mask);
Pack pack;
CUDA_1D_KERNEL_LOOP(i, n / sizeof(PackType)) {
#pragma unroll
for (int j = 0; j < sizeof(PackType); ++j) { pack.b_value[j] = GenMask(&localState, rate); }
pack_mask[i] = pack.p_value;
}
const int32_t rem_cnt = n % sizeof(PackType);
const int32_t rem_offset = n - rem_cnt;
if (id < rem_cnt) { mask[id + rem_offset] = GenMask(&localState, rate); }
state[id] = localState;
}
} // namespace
RandomMaskGenerator<DeviceType::kGPU>::RandomMaskGenerator(int64_t seed) {
hipDeviceProp_t prop;
OF_CUDA_CHECK(hipGetDeviceProperties(&prop, 0));
block_num_ = prop.multiProcessorCount;
thread_num_ = GetThreadNum(prop);
OF_CUDA_CHECK(hipMalloc(&curand_states_, block_num_ * thread_num_ * sizeof(hiprandState_t)));
hipLaunchKernelGGL(( SetupKernel), dim3(block_num_), dim3(thread_num_), 0, 0, seed, curand_states_);
}
RandomMaskGenerator<DeviceType::kGPU>::~RandomMaskGenerator() {
OF_CUDA_CHECK(hipFree(curand_states_));
}
void RandomMaskGenerator<DeviceType::kGPU>::Generate(DeviceCtx* device_ctx, const int64_t n,
const float rate, int8_t* mask) {
const int32_t elem_cnt_per_block = thread_num_ * sizeof(PackType) * kMinPackPerThread;
const int32_t block_num =
::min(static_cast<int32_t>((n + elem_cnt_per_block - 1) / elem_cnt_per_block), block_num_);
hipLaunchKernelGGL(( GenerateGpu), dim3(block_num), dim3(thread_num_), 0, device_ctx->cuda_stream(), curand_states_, n, rate,
mask);
}
template class RandomMaskGenerator<DeviceType::kGPU>;
} // namespace oneflow
| e37329b0669c4ed69f568e26c66f0122d6292917.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/user/kernels/random_mask_generator.h"
namespace oneflow {
namespace {
constexpr int32_t kMinPackPerThread = 2;
using PackType = ulonglong2;
union Pack {
PackType p_value;
int8_t b_value[sizeof(PackType)];
};
int GetThreadNum(const cudaDeviceProp& prop) {
switch (prop.major) {
case 3: // Kepler
return 2 * 192;
case 5: // Maxwell
return 2 * 128;
case 6: // Pascal
if ((prop.minor == 1) || (prop.minor == 2)) {
return 2 * 128;
} else {
return 2 * 64;
}
case 7: // Volta and Turing
return 2 * 64;
default: return 2 * 64;
}
}
__device__ int8_t GenMask(curandState* state, const float rate) {
return curand_uniform(state) >= rate;
}
__global__ void SetupKernel(int64_t seed, curandState* state) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
size_t local_seed = (static_cast<size_t>(seed) + 0x9e3779b9U + (static_cast<size_t>(id) << 6U)
+ (static_cast<size_t>(id) >> 2U));
curand_init(local_seed, 0, 0, &state[id]);
}
__global__ void GenerateGpu(curandState* state, const int64_t n, const float rate, int8_t* mask) {
const int id = blockIdx.x * blockDim.x + threadIdx.x;
curandState localState = state[id];
PackType* pack_mask = reinterpret_cast<PackType*>(mask);
Pack pack;
CUDA_1D_KERNEL_LOOP(i, n / sizeof(PackType)) {
#pragma unroll
for (int j = 0; j < sizeof(PackType); ++j) { pack.b_value[j] = GenMask(&localState, rate); }
pack_mask[i] = pack.p_value;
}
const int32_t rem_cnt = n % sizeof(PackType);
const int32_t rem_offset = n - rem_cnt;
if (id < rem_cnt) { mask[id + rem_offset] = GenMask(&localState, rate); }
state[id] = localState;
}
} // namespace
RandomMaskGenerator<DeviceType::kGPU>::RandomMaskGenerator(int64_t seed) {
cudaDeviceProp prop;
OF_CUDA_CHECK(cudaGetDeviceProperties(&prop, 0));
block_num_ = prop.multiProcessorCount;
thread_num_ = GetThreadNum(prop);
OF_CUDA_CHECK(cudaMalloc(&curand_states_, block_num_ * thread_num_ * sizeof(curandState)));
SetupKernel<<<block_num_, thread_num_>>>(seed, curand_states_);
}
RandomMaskGenerator<DeviceType::kGPU>::~RandomMaskGenerator() {
OF_CUDA_CHECK(cudaFree(curand_states_));
}
void RandomMaskGenerator<DeviceType::kGPU>::Generate(DeviceCtx* device_ctx, const int64_t n,
const float rate, int8_t* mask) {
const int32_t elem_cnt_per_block = thread_num_ * sizeof(PackType) * kMinPackPerThread;
const int32_t block_num =
std::min(static_cast<int32_t>((n + elem_cnt_per_block - 1) / elem_cnt_per_block), block_num_);
GenerateGpu<<<block_num, thread_num_, 0, device_ctx->cuda_stream()>>>(curand_states_, n, rate,
mask);
}
template class RandomMaskGenerator<DeviceType::kGPU>;
} // namespace oneflow
|
d9332fe613bb89315acc56eae7f233b2ec6a3d06.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <omp.h>
#include <math.h>
#include "cudaKernel.h"
#include "Perceptron.h"
hipError_t memcpyDoubleArrayToHost(double **dest, double **src, int n) {
hipError_t cudaStatus = hipSuccess;
cudaStatus = hipMemcpy(*dest, *src, n * sizeof(double), hipMemcpyDeviceToHost);
CHECK_ERRORS(cudaStatus, "hipMemcpy - double failed\n", hipErrorUnknown);
return cudaStatus;
}
hipError_t memcpy_double_array_to_device(double **dest, double **src, int n) {
hipError_t cudaStatus = hipSuccess;
cudaStatus = hipMemcpy(*dest, *src, n * sizeof(double), hipMemcpyHostToDevice);
CHECK_ERRORS(cudaStatus, "hipMemcpy - double failed\n", hipErrorUnknown);
return cudaStatus;
}
hipError_t memcpy_point_array_to_device(Point **dest, Point **src, int n) {
hipError_t cudaStatus = hipSuccess;
cudaStatus = hipMemcpy(*dest, *src, n * sizeof(Point), hipMemcpyHostToDevice);
CHECK_ERRORS(cudaStatus, "hipMemcpy - Point failed\n", hipErrorUnknown);
return cudaStatus;
}
__global__ void count_correct_points_kernel(int *result, int *sum_results, int size) {
int i, index = threadIdx.x;
sum_results[index] = 0;
int chunk_size = NUM_CUDA_CORES;
int start_index = threadIdx.x * chunk_size;
for (i = start_index; i < start_index + chunk_size; i++) {
if (i >= size)
break;
if (result[i] != POINT_CORRECT) {
sum_results[index]++;
}
}
}
__device__ void mult_scalar_with_vector_device(double* vector, int dim, double scalar, double* result_vector) {
for (int i = 0; i < dim; i++)
result_vector[i] = vector[i] * scalar;
}
__device__ void add_vector_to_vector_device(double* vector1, double* vector2, int dim, double* result_vector) {
for (int i = 0; i < dim; i++)
result_vector[i] = vector1[i] + vector2[i];
}
__device__ int sign_device(double val)
{
if (val >= 0)
return SET_A;
return SET_B;
}
__global__ void sum_count_results_kernel(int *sum_results, int size) {
int sum = 0;
for (int i = 0; i < size; i++)
{
sum += sum_results[i];
}
sum_results[0] = sum;
}
__device__ double mult_vector_with_vector_device(double* vector1, double* vector2, int dim) {
double result = vector1[0] * vector2[0];
for (int i = 1; i < dim; i++)
result += vector1[i] * vector2[i];
return result;
}
__global__ void f_on_GPU_kernel(int *result, Point* points, double* W, int N, int K) {
int index = threadIdx.x + blockIdx.x * NUM_CUDA_CORES;
if (index >= N)
return;
double val = mult_vector_with_vector_device(points[index].x, W, K + 1);
if (sign_device(val) != points[index].set)
result[index] = index;
else
result[index] = POINT_CORRECT;
}
hipError_t set_device()
{
hipError_t cudaStatus = hipSuccess;
cudaStatus = hipSetDevice(0);
CHECK_ERRORS(cudaStatus, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?", hipErrorUnknown);
return cudaStatus;
}
hipError_t cuda_malloc_double_by_size(double** arr, int arr_size)
{
set_device();
hipError_t cudaStatus = hipSuccess;
cudaStatus = hipMalloc((void**)arr, arr_size * sizeof(double));
CHECK_ERRORS(cudaStatus, "hipMalloc failed!", hipErrorUnknown);
return cudaStatus;
}
hipError_t cuda_malloc_point_by_size(Point** arr, int arr_size)
{
set_device();
hipError_t cudaStatus = hipSuccess;
cudaStatus = hipMalloc((void**)arr, arr_size * sizeof(Point));
CHECK_ERRORS(cudaStatus, "hipMalloc failed!", hipErrorUnknown);
return cudaStatus;
}
hipError_t free_cuda_point_array(Point** dev_points) {
hipError_t cudaStatus = hipSuccess;
set_device();
Point point_zero;
cudaStatus = hipMemcpy(&point_zero, (*dev_points), sizeof(Point), hipMemcpyDeviceToHost);
CHECK_ERRORS(cudaStatus, "cudaMemCpy failed!", hipErrorUnknown);
//freeing dev_points[0].x will free the rest of the points memory as well
cudaStatus = hipFree(point_zero.x);
CHECK_ERRORS(cudaStatus, "hipFree failed!", hipErrorUnknown);
cudaStatus = hipFree(*dev_points);
CHECK_ERRORS(cudaStatus, "hipFree failed!", hipErrorUnknown);
return cudaStatus;
}
hipError_t cuda_malloc_and_free_pointers_from_quality_function(int N, int K, int num_blocks, double** W_dev, int** device_results, int** sum_results, int malloc_flag)
{
static int is_last_malloc_flag = FREE_MALLOC_FLAG;
static double *W_dev_p = 0;
static int *device_results_p = 0, *sum_results_p = 0;
hipError_t cudaStatus = hipSuccess;
set_device();
if (!is_last_malloc_flag && malloc_flag == MALLOC_FLAG)
{
cudaStatus = hipMalloc((void**)W_dev, sizeof(double)*(K + 1));
CHECK_ERRORS(cudaStatus, "hipMalloc failed!\n", hipErrorUnknown);
cudaStatus = hipMalloc((void**)device_results, sizeof(int)*N);
CHECK_ERRORS(cudaStatus, "hipMalloc failed!\n", hipErrorUnknown);
cudaStatus = hipMalloc((void**)sum_results, sizeof(int)*num_blocks);
CHECK_ERRORS(cudaStatus, "hipMalloc failed!\n", hipErrorUnknown);
W_dev_p = *W_dev;
device_results_p = *device_results;
sum_results_p = *sum_results;
is_last_malloc_flag = MALLOC_FLAG;
}
else if (is_last_malloc_flag && malloc_flag == FREE_MALLOC_FLAG)
{
cudaStatus = hipFree(W_dev_p);
CHECK_ERRORS(cudaStatus, "hipFree failed!\n", hipErrorUnknown);
cudaStatus = hipFree(device_results_p);
CHECK_ERRORS(cudaStatus, "hipFree failed!\n", hipErrorUnknown);
cudaStatus = hipFree(sum_results_p);
CHECK_ERRORS(cudaStatus, "hipFree failed!\n", hipErrorUnknown);
is_last_malloc_flag = FREE_MALLOC_FLAG;
}
return cudaStatus;
}
hipError_t get_quality_with_GPU(Point* points, double* W, int N, int K, double* q) {
static int *device_results, *sum_results;
static double *W_dev;
int count;
int num_blocks = (int)ceil(N / (double)NUM_CUDA_CORES);
hipError_t cudaStatus = hipSuccess;
cuda_malloc_and_free_pointers_from_quality_function(N, K, num_blocks, &W_dev, &device_results, &sum_results, MALLOC_FLAG);
memcpy_double_array_to_device(&W_dev, &W, K + 1);
/*Do f on all points with adjusted W*/
hipLaunchKernelGGL(( f_on_GPU_kernel) , dim3(num_blocks), dim3(NUM_CUDA_CORES) , 0, 0, device_results, points, W_dev, N, K);
CHECK_AND_SYNC_ERRORS("fOnGPUKernel launch failed\n");
/*count number of correct points in each block*/
hipLaunchKernelGGL(( count_correct_points_kernel) , dim3(1), dim3(num_blocks) , 0, 0, device_results, sum_results, N);
CHECK_AND_SYNC_ERRORS("sumResultsKernel launch failed\n");
/*count of incorrect points in sum_results[0] - sum of sums from previous function*/
hipLaunchKernelGGL(( sum_count_results_kernel) , dim3(1), dim3(1) , 0, 0, sum_results, num_blocks);
CHECK_AND_SYNC_ERRORS("adjustW_with_faulty_point launch failed\n");
cudaStatus = hipMemcpy(&count, &(sum_results[0]), sizeof(int), hipMemcpyDeviceToHost);
CHECK_ERRORS(cudaStatus, "Cudamemcpy failed\n", hipErrorUnknown);
*q = (count / (double)N);
return cudaStatus;
} | d9332fe613bb89315acc56eae7f233b2ec6a3d06.cu |
#include <stdio.h>
#include <omp.h>
#include <math.h>
#include "cudaKernel.h"
#include "Perceptron.h"
cudaError_t memcpyDoubleArrayToHost(double **dest, double **src, int n) {
cudaError_t cudaStatus = cudaSuccess;
cudaStatus = cudaMemcpy(*dest, *src, n * sizeof(double), cudaMemcpyDeviceToHost);
CHECK_ERRORS(cudaStatus, "cudaMemcpy - double failed\n", cudaErrorUnknown);
return cudaStatus;
}
cudaError_t memcpy_double_array_to_device(double **dest, double **src, int n) {
cudaError_t cudaStatus = cudaSuccess;
cudaStatus = cudaMemcpy(*dest, *src, n * sizeof(double), cudaMemcpyHostToDevice);
CHECK_ERRORS(cudaStatus, "cudaMemcpy - double failed\n", cudaErrorUnknown);
return cudaStatus;
}
cudaError_t memcpy_point_array_to_device(Point **dest, Point **src, int n) {
cudaError_t cudaStatus = cudaSuccess;
cudaStatus = cudaMemcpy(*dest, *src, n * sizeof(Point), cudaMemcpyHostToDevice);
CHECK_ERRORS(cudaStatus, "cudaMemcpy - Point failed\n", cudaErrorUnknown);
return cudaStatus;
}
__global__ void count_correct_points_kernel(int *result, int *sum_results, int size) {
int i, index = threadIdx.x;
sum_results[index] = 0;
int chunk_size = NUM_CUDA_CORES;
int start_index = threadIdx.x * chunk_size;
for (i = start_index; i < start_index + chunk_size; i++) {
if (i >= size)
break;
if (result[i] != POINT_CORRECT) {
sum_results[index]++;
}
}
}
__device__ void mult_scalar_with_vector_device(double* vector, int dim, double scalar, double* result_vector) {
for (int i = 0; i < dim; i++)
result_vector[i] = vector[i] * scalar;
}
__device__ void add_vector_to_vector_device(double* vector1, double* vector2, int dim, double* result_vector) {
for (int i = 0; i < dim; i++)
result_vector[i] = vector1[i] + vector2[i];
}
__device__ int sign_device(double val)
{
if (val >= 0)
return SET_A;
return SET_B;
}
__global__ void sum_count_results_kernel(int *sum_results, int size) {
int sum = 0;
for (int i = 0; i < size; i++)
{
sum += sum_results[i];
}
sum_results[0] = sum;
}
__device__ double mult_vector_with_vector_device(double* vector1, double* vector2, int dim) {
double result = vector1[0] * vector2[0];
for (int i = 1; i < dim; i++)
result += vector1[i] * vector2[i];
return result;
}
__global__ void f_on_GPU_kernel(int *result, Point* points, double* W, int N, int K) {
int index = threadIdx.x + blockIdx.x * NUM_CUDA_CORES;
if (index >= N)
return;
double val = mult_vector_with_vector_device(points[index].x, W, K + 1);
if (sign_device(val) != points[index].set)
result[index] = index;
else
result[index] = POINT_CORRECT;
}
cudaError_t set_device()
{
cudaError_t cudaStatus = cudaSuccess;
cudaStatus = cudaSetDevice(0);
CHECK_ERRORS(cudaStatus, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?", cudaErrorUnknown);
return cudaStatus;
}
cudaError_t cuda_malloc_double_by_size(double** arr, int arr_size)
{
set_device();
cudaError_t cudaStatus = cudaSuccess;
cudaStatus = cudaMalloc((void**)arr, arr_size * sizeof(double));
CHECK_ERRORS(cudaStatus, "cudaMalloc failed!", cudaErrorUnknown);
return cudaStatus;
}
cudaError_t cuda_malloc_point_by_size(Point** arr, int arr_size)
{
set_device();
cudaError_t cudaStatus = cudaSuccess;
cudaStatus = cudaMalloc((void**)arr, arr_size * sizeof(Point));
CHECK_ERRORS(cudaStatus, "cudaMalloc failed!", cudaErrorUnknown);
return cudaStatus;
}
cudaError_t free_cuda_point_array(Point** dev_points) {
cudaError_t cudaStatus = cudaSuccess;
set_device();
Point point_zero;
cudaStatus = cudaMemcpy(&point_zero, (*dev_points), sizeof(Point), cudaMemcpyDeviceToHost);
CHECK_ERRORS(cudaStatus, "cudaMemCpy failed!", cudaErrorUnknown);
//freeing dev_points[0].x will free the rest of the points memory as well
cudaStatus = cudaFree(point_zero.x);
CHECK_ERRORS(cudaStatus, "cudaFree failed!", cudaErrorUnknown);
cudaStatus = cudaFree(*dev_points);
CHECK_ERRORS(cudaStatus, "cudaFree failed!", cudaErrorUnknown);
return cudaStatus;
}
cudaError_t cuda_malloc_and_free_pointers_from_quality_function(int N, int K, int num_blocks, double** W_dev, int** device_results, int** sum_results, int malloc_flag)
{
static int is_last_malloc_flag = FREE_MALLOC_FLAG;
static double *W_dev_p = 0;
static int *device_results_p = 0, *sum_results_p = 0;
cudaError_t cudaStatus = cudaSuccess;
set_device();
if (!is_last_malloc_flag && malloc_flag == MALLOC_FLAG)
{
cudaStatus = cudaMalloc((void**)W_dev, sizeof(double)*(K + 1));
CHECK_ERRORS(cudaStatus, "cudaMalloc failed!\n", cudaErrorUnknown);
cudaStatus = cudaMalloc((void**)device_results, sizeof(int)*N);
CHECK_ERRORS(cudaStatus, "cudaMalloc failed!\n", cudaErrorUnknown);
cudaStatus = cudaMalloc((void**)sum_results, sizeof(int)*num_blocks);
CHECK_ERRORS(cudaStatus, "cudaMalloc failed!\n", cudaErrorUnknown);
W_dev_p = *W_dev;
device_results_p = *device_results;
sum_results_p = *sum_results;
is_last_malloc_flag = MALLOC_FLAG;
}
else if (is_last_malloc_flag && malloc_flag == FREE_MALLOC_FLAG)
{
cudaStatus = cudaFree(W_dev_p);
CHECK_ERRORS(cudaStatus, "cudaFree failed!\n", cudaErrorUnknown);
cudaStatus = cudaFree(device_results_p);
CHECK_ERRORS(cudaStatus, "cudaFree failed!\n", cudaErrorUnknown);
cudaStatus = cudaFree(sum_results_p);
CHECK_ERRORS(cudaStatus, "cudaFree failed!\n", cudaErrorUnknown);
is_last_malloc_flag = FREE_MALLOC_FLAG;
}
return cudaStatus;
}
cudaError_t get_quality_with_GPU(Point* points, double* W, int N, int K, double* q) {
static int *device_results, *sum_results;
static double *W_dev;
int count;
int num_blocks = (int)ceil(N / (double)NUM_CUDA_CORES);
cudaError_t cudaStatus = cudaSuccess;
cuda_malloc_and_free_pointers_from_quality_function(N, K, num_blocks, &W_dev, &device_results, &sum_results, MALLOC_FLAG);
memcpy_double_array_to_device(&W_dev, &W, K + 1);
/*Do f on all points with adjusted W*/
f_on_GPU_kernel <<<num_blocks, NUM_CUDA_CORES >>> (device_results, points, W_dev, N, K);
CHECK_AND_SYNC_ERRORS("fOnGPUKernel launch failed\n");
/*count number of correct points in each block*/
count_correct_points_kernel <<<1, num_blocks >>> (device_results, sum_results, N);
CHECK_AND_SYNC_ERRORS("sumResultsKernel launch failed\n");
/*count of incorrect points in sum_results[0] - sum of sums from previous function*/
sum_count_results_kernel <<<1, 1 >>> (sum_results, num_blocks);
CHECK_AND_SYNC_ERRORS("adjustW_with_faulty_point launch failed\n");
cudaStatus = cudaMemcpy(&count, &(sum_results[0]), sizeof(int), cudaMemcpyDeviceToHost);
CHECK_ERRORS(cudaStatus, "Cudamemcpy failed\n", cudaErrorUnknown);
*q = (count / (double)N);
return cudaStatus;
} |
91ea618e9a409de12988fa11592d9c618d8dae48.hip | // !!! This is a file automatically generated by hipify!!!
// library initialization and finalization
#include <hip/hip_runtime.h>
#include "test.h"
#include "mqx.h"
int test_init_fini()
{
MQX_TPRINT("In test_init_fini");
return 0;
}
| 91ea618e9a409de12988fa11592d9c618d8dae48.cu | // library initialization and finalization
#include <cuda.h>
#include "test.h"
#include "mqx.h"
int test_init_fini()
{
MQX_TPRINT("In test_init_fini");
return 0;
}
|
123d69cb297bc0acbc95e220f645e2fe0c18253a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Copyright (c) 2017-2018 Christoph A. Hartmann, Ulrich Margull and Technische Hochschule Ingolstadt (THI)
//
//Permission is hereby granted, free of charge, to any person obtaining a copy of this
//software and associated documentation files (the "Software"), to deal in the Software
//without restriction, including without limitation the rights to use, copy, modify,
//merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
//permit persons to whom the Software is furnished to do so, subject to the following
//conditions:
//
//The above copyright notice and this permission notice shall be included in all copies
//or substantial portions of the Software.
//
//THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
//INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
//PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
//HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
//OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
//SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
/*
* File: GPUart_Impl.cu
* Created by: Christoph Hartmann
* Institute: Technische Hochschule Ingolstadt
* Date: 07.04.2017 */
/********************************************************
* ___ ___ ___ ___ 3 ___ *
* | | | | | |\ /| | | | | | *
* |___ | | |___| | \/ | | | |___ | | *
* | | | |\ | | | | | | | *
* | |___| | \ | | |___| ___| | |___ *
* *
*********************************************************/
/*! @file GPUart_Impl.cu
*
* @brief Implementation of the management component of the GPUart Implemenation layer.
*
* This file concentrates all GPGPU related memory declarations and allocations, memory transfers
* operations, kernel launches, kernel initialisations, and GPU related implementation details.
*
*
* @author Christoph Hartmann
* @date Created on: 7 Apr 2017
*/
/************************************************************************************************/
/* Includes */
/************************************************************************************************/
//include header of Implementation layer
#include "GPUart_Impl.cuh"
#include "GPUart_Impl.h"
//include interfaces to other GPUart layer
#include "GPUart_Impl_Abstr_IF.h"
#include "GPUart_Impl_Sched_IF.h"
//include kernel libraries
#include "GPUart_Sobel.cuh"
#include "GPUart_MatrMul.cuh"
/************************************************************************************************/
/* Compiler Switches */
/************************************************************************************************/
/*! @brief Use zero copy memory (requires integrated GPU)
*
* This MUST be defined so far, since memory transfers over PCIe are currently not implemented completely.
*
* @see http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#zero-copy-memory
* @see https://software.intel.com/en-us/articles/getting-the-most-from-opencl-12-how-to-increase-performance-by-minimizing-buffer-copies-on-intel-processor-graphics
*/
#define S_USE_ZERO_COPY_FOR_GLOBAL_APPLICATION_MEMORY
/************************************************************************************************/
/* Constants */
/************************************************************************************************/
/*!
* @brief The length of the Event Queue, shared between GPU and CPU, used for kernel launch events.
*
* @see perKer_eventQueueCntHost_u32_host
* @see perKer_eventQueueCntDevice_u32_host
* @see perKer_eventQueue_s32_host
*/
#define C_PERSISTENT_KERNEL_EVENT_QUEUE_LENGTH (10) //Length of event queue
/*!
* @brief Event ID to indicate a termination request for the persistent kernel
*
* @see perKer_eventQueueCntHost_u32_host
* @see perKer_eventQueueCntHost_u32_g
*/
#define C_PERSISTENT_KERNEL_TERMINATE (-1) //Event ID to terminate persistent kernel
/************************************************************************************************/
/* Typedef */
/************************************************************************************************/
/*!
* @brief Typedef for command queues (streams) to abstract GPGPU-API
*
* Command queues are required to improve the concurrency of memory and kernel operatation on the GPU.
*
* @see https://developer.download.nvidia.com/CUDA/training/StreamsAndConcurrencyWebinar.pdf
* @see https://www.khronos.org/registry/OpenCL/sdk/1.0/docs/man/xhtml/clCreateCommandQueue.html
*/
typedef hipStream_t command_queue_s;
/*!
* @brief Typedef for a struct which combines global memory pointers, their related host pointers,
* and the size of the memory buffer.
*
*/
typedef struct
{
void ** mem_ptr;
void ** host_ptr;
size_t mem_size;
}device_global_memory_s;
/*!
* @brief Typedef for a struct which combines constant memory pointers and the size of the related memory buffer.
*
*/
typedef struct
{
void ** mem_ptr;
size_t mem_size;
}device_constant_memory_s;
/************************************************************************************************/
/* General Variables */
/************************************************************************************************/
/*!
* @brief The command queue (stream) for memory operations
*/
static command_queue_s memory_command_queue_s;
/*!
* @brief The command queue (stream) for the persistent kernel
*/
static command_queue_s persistent_kernel_command_queue_s;
/*! @var perKer_isRunning_u32_host
* @brief A status flag, which represents the running status of the persistent kernel (host pointer).
* @see perKer_isRunning_u32_g
*/
/*! @var perKer_isRunning_u32_g
* @brief A status flag, which represents the running status of the persistent kernel (device pointer).
* @see perKer_isRunning_u32_host
*/
volatile uint32 *perKer_isRunning_u32_host;
uint32 *perKer_isRunning_u32_g;
/*! @var perKer_eventQueueCntHost_u32_host
* @brief The index of the tail of the event queue for kernel launches written by the host (host pointer).
* @see perKer_eventQueueCntDevice_u32_g
*/
/*! @var perKer_eventQueueCntHost_u32_g
* @brief The index of the tail of the event queue for kernel launches written by the host (device pointer).
* @see perKer_eventQueueCntHost_u32_host
*/
volatile uint32 *perKer_eventQueueCntHost_u32_host;
uint32 *perKer_eventQueueCntHost_u32_g;
/*! @var perKer_eventQueueCntDevice_u32_host
* @brief The index of the head of the event queue for kernel launches written by the device (host pointer).
* @see perKer_eventQueueCntDevice_u32_g
*/
/*! @var perKer_eventQueueCntDevice_u32_g
* @brief The index of the head of the event queue for kernel launches written by the device (device pointer).
* @see perKer_eventQueueCntDevice_u32_host
*/
volatile uint32 *perKer_eventQueueCntDevice_u32_host;
uint32 *perKer_eventQueueCntDevice_u32_g;
/*! @var perKer_eventQueue_s32_host
* @brief The event queue for kernel launch requests, written by the CPU and red by the GPU (host pointer).
*
* To request a kernel launch, write the kernel's ID (#kernel_task_id_e) into the tail of the queue.
* Write #C_PERSISTENT_KERNEL_TERMINATE to terminate the persistent kernel #GPUart_Persistent_Kernel.
* @see perKer_eventQueue_s32_g
*/
/*! @var perKer_eventQueue_s32_g
* @brief The event queue for kernel launch requests, written by the CPU and red by the GPU (device pointer).
*
* To request a kernel launch, write the kernel's ID (#kernel_task_id_e) into the tail of the queue.
* Write #C_PERSISTENT_KERNEL_TERMINATE to terminate the persistent kernel #GPUart_Persistent_Kernel.
* @see perKer_eventQueue_s32_host
*/
volatile sint32 *perKer_eventQueue_s32_host;
sint32 *perKer_eventQueue_s32_g;
/*! @var perKer_kernelTasksRunningStates_u32_host
* @brief A status flag, which represents the running status of each kernel (host pointer).
* @see perKer_kernelTasksRunningStates_u32_g
*/
/*! @var perKer_kernelTasksRunningStates_u32_g
* @brief A status flag, which represents the running status of each kernel (device pointer).
* @see perKer_kernelTasksRunningStates_u32_host
*/
volatile uint32 *perKer_kernelTasksRunningStates_u32_host;
uint32 *perKer_kernelTasksRunningStates_u32_g;
/*!
* @brief The allowed job cost per kernel
*
* This value is equal to m * , whereby m is the number of Streaming Multiprocessors of the GPU
* #gpuS_nrOfMultiprocessor_u32 and is the resource factor #C_GPUS_RESOURCE_FACTOR.
*
* @see kernel_task_id_e
* @see C_GPUS_RESOURCE_FACTOR
* @see gpuS_nrOfMultiprocessor_u32
* @see kernel_job_costs
*/
uint32 max_costs_per_kernel = 0;
/************************************************************************************************/
/* Kernel Task Variables - E_KTID_SOBEL1 */
/************************************************************************************************/
sint32 * sob1_matrix_in_s32_g, * sob1_matrix_in_s32_host;
sint32 * sob1_matrix_out_s32_g, * sob1_matrix_out_s32_host;
/* Synchronization variables */
uint32 * sync_SOB1_flags_in_u32_g;
uint32 * sync_SOB1_flags_out_u32_g;
/* Preemption related variables*/
sint32 * preempt_SOB1_flag_g;
volatile sint32 *preempt_SOB1_flag_host;
sint32 * preempt_SOB1_flag_internal_g;
sint32 * preempt_SOB1_sm_g;
volatile sint32 *preempt_SOB1_sm_host;
/* Buffer variables */
uint32 * sob1_buffer_loop_counter_u32_g;
/************************************************************************************************/
/* Kernel Task Variables - E_KTID_SOBEL2 */
/************************************************************************************************/
sint32 * sob2_matrix_in_s32_g, * sob2_matrix_in_s32_host;
sint32 * sob2_matrix_out_s32_g, * sob2_matrix_out_s32_host;
/* Synchronization variables */
uint32 * sync_SOB2_flags_in_u32_g;
uint32 * sync_SOB2_flags_out_u32_g;
/* Preemption related variables*/
sint32 * preempt_SOB2_flag_g;
volatile sint32 *preempt_SOB2_flag_host;
sint32 * preempt_SOB2_flag_internal_g;
sint32 * preempt_SOB2_sm_g;
volatile sint32 *preempt_SOB2_sm_host;
/* Buffer variables */
uint32 * sob2_buffer_loop_counter_u32_g;
/************************************************************************************************/
/* Kernel Task Variables - E_KTID_MM */
/************************************************************************************************/
float32 * mm_matrix_A_f32_g, * mm_matrix_A_f32_host;
float32 * mm_matrix_B_f32_g, * mm_matrix_B_f32_host;
float32 * mm_matrix_C_f32_g, * mm_matrix_C_f32_host;
/* Synchronization variables */
uint32 * sync_MM_flags_in_u32_g;
uint32 * sync_MM_flags_out_u32_g;
/* Preemption related variables*/
sint32 * preempt_MM_flag_g;
volatile sint32 *preempt_MM_flag_host;
sint32 * preempt_MM_sm_g;
volatile sint32 *preempt_MM_sm_host;
/* Buffer variables */
uint32 * mm_buffer_blockY_g;
uint32 * mm_buffer_blockX_g;
uint32 * mm_buffer_M_g;
/************************************************************************************************/
/* Constant Variable Table */
/************************************************************************************************/
/*!
* @brief The constant memory table
*
* All constant memory buffers which must be written during runtime must be defined here.
* The i'th element represents the i'th constant memory buffer, define by #device_constant_memory_id_e
* in GPUart_Config.h. Each element must defined in the following style: { (void **)& CONSTANT_BUFFER_NAME,
* SIZE_IN_BYTES }.
*
* @see device_constant_memory_id_e
*/
static device_constant_memory_s constant_memory_list_a[E_CM_TOTAL_NR_OF_CONST_MEM_VARIABLES] =
{
//{ (void **)& VARIABLE_NAME, SIZE IN BYTES }
};
/************************************************************************************************/
/* Global Variable Table */
/************************************************************************************************/
/*!
* @brief The global memory table
*
* All global memory buffers which must be written or red during runtime must be defined here.
* The i'th element represents the i'th global memory buffer, define by #device_global_memory_id_e
* in GPUart_Config.h. Each element must defined in the following style: { (void **)&
* GLOBAL_MEMORY_BUFFER_POINTER_DEVICE, GLOBAL_MEMORY_BUFFER_POINTER_HOST, SIZE_IN_BYTES }.
*
* @see device_global_memory_id_e
*/
static device_global_memory_s global_memory_list_a[E_GM_TOTAL_NR_OF_GLOB_MEM_VARIABLES] =
{
/* Sobel1 */
{ (void **)&sob1_matrix_in_s32_g, (void **)&sob1_matrix_in_s32_host, C_SOB1_MATRIX_SIZE * sizeof(sint32) }, //E_GM_ID_SOB1_MATRIX_IN
{ (void **)&sob1_matrix_out_s32_g, (void **)&sob1_matrix_out_s32_host, C_SOB1_MATRIX_SIZE * sizeof(sint32) }, //E_GM_ID_SOB1_MATRIX_OUT
/* Sobel2 */
{ (void **)&sob2_matrix_in_s32_g, (void **)&sob2_matrix_in_s32_host, C_SOB2_MATRIX_SIZE * sizeof(sint32) }, //E_GM_ID_SOB2_MATRIX_IN
{ (void **)&sob2_matrix_out_s32_g, (void **)&sob2_matrix_out_s32_host, C_SOB2_MATRIX_SIZE * sizeof(sint32) }, //E_GM_ID_SOB2_MATRIX_OUT
/* MatrMul */
{ (void **)&mm_matrix_A_f32_g, (void **)&mm_matrix_A_f32_host, C_MM_MATRIX_TOTAL_SIZE * sizeof(sint32) }, //E_GM_ID_MM_MATRIX_A
{ (void **)&mm_matrix_B_f32_g, (void **)&mm_matrix_B_f32_host, C_MM_MATRIX_TOTAL_SIZE * sizeof(sint32) }, //E_GM_ID_MM_MATRIX_B
{ (void **)&mm_matrix_C_f32_g, (void **)&mm_matrix_C_f32_host, C_MM_MATRIX_TOTAL_SIZE * sizeof(sint32) } //E_GM_ID_MM_MATRIX_C
};
/************************************************************************************************/
/* Preemption Flag Table */
/************************************************************************************************/
/*!
* @brief The preemption flag table
*
* All preemption flags must be included by this table.
* The i'th element represents the i'th kernel, according to the enum #kernel_task_id_e
* in GPUart_Config.h. Each element must defined in the following style: (volatile sint32**)&
* NAME_OF_PREEMPTION_FLAG_POINTER. If a kernel does not implement a preemption flag, because it
* is non-preemptive, insert a NULL.
*
* @see kernel_task_id_e
*/
static volatile sint32** device_preemption_flags_a[E_KTID_NUMBER_OF_KERNEL_TASKS] =
{
(volatile sint32**) &preempt_SOB1_flag_host, //E_KTID_SOBEL1
(volatile sint32**) &preempt_SOB2_flag_host, //E_KTID_SOBEL2
(volatile sint32**) &preempt_MM_flag_host //E_KTID_MM
};
/************************************************************************************************/
/* Preemption Enabled Parameter Table */
/************************************************************************************************/
/*!
* @brief The preemption enabled table
*
* The i'th element represents the i'th kernel, according to the enum #kernel_task_id_e
* in GPUart_Config.h. Each element must defined in the following style: #C_TRUE if the related kernel
* is preemptive; #C_FALSE if the related kernel is non-preemptive.
*
* @see kernel_task_id_e
*/
const static sint32 preemption_enabled_a[E_KTID_NUMBER_OF_KERNEL_TASKS] =
{
C_TRUE, //E_KTID_SOBEL1
C_TRUE, //E_KTID_SOBEL2
C_TRUE //E_KTID_MM
};
/************************************************************************************************/
/* Kernel State Machine Table */
/************************************************************************************************/
/*!
* @brief The kernel state machine table
*
* The i'th element represents the i'th kernel, according to the enum #kernel_task_id_e
* in GPUart_Config.h. Each element must defined in the following style: &NAME_OF_STATE_MACHINE_POINTER.
* Use NULL if the related kernel is non-preemptive.
*
* @see kernel_task_id_e
*/
static volatile sint32** device_kernel_task_SM_a[E_KTID_NUMBER_OF_KERNEL_TASKS] =
{
&preempt_SOB1_sm_host, //E_KTID_SOBEL1
&preempt_SOB2_sm_host, //E_KTID_SOBEL2
&preempt_MM_sm_host //E_KTID_MM
};
/*!
* @brief The number of state machines table
*
* The i'th element represents the i'th kernel, according to the enum kernel_task_id_e
* in GPUart_Config.h. Each element must defined in the following style: NUMBER_OF_SM_IN_KERNEL.
* If a kernel preempts grid-synchronous then use the value 1u. If a kernel preempts thread-block
* synchronous then use the number of thread blocks of this kernel. If a kernel is non-preemptive
* then use 0u.
*
* @see kernel_task_id_e
*/
static uint32 nb_of_StateMachines_in_kernel_a[E_KTID_NUMBER_OF_KERNEL_TASKS] =
{
1u, //E_KTID_SOBEL1 -> Grid-wide preemption
1u, //E_KTID_SOBEL2 -> Grid-wide preemption
C_MM_NUMBER_OF_BLOCKS //E_KTID_MM -> Thread block-wide preemption
};
/************************************************************************************************/
/* Kernel Cost Table */
/************************************************************************************************/
/*!
* @brief The job cost table
*
* The i'th element represents the i'th kernel, according to the enum kernel_task_id_e
* in GPUart_Config.h. Each element represents the job costs of the related kernel.
* If a thread block of a kernel requires more then 1/ of the available registers, shared memory,
* thread residency slots, or thread block residency slots of an Streaming Multiprocessor,
* then set corresponding value to m * , whereby is the resource factor and m is the GPU's
* number of Streaming Multiprocessors. If a thread block of a kernel requires less then 1/ of each
* resource type, then set the corresponding value to the kernels number of thread blocks.
*
* @see kernel_task_id_e
* @see C_GPUS_RESOURCE_FACTOR
* @see gpuS_nrOfMultiprocessor_u32
* @see max_costs_per_kernel
*/
static uint8 kernel_job_costs[E_KTID_NUMBER_OF_KERNEL_TASKS] =
{
C_SOB1_NUMBER_OF_BLOCKS, //E_KTID_SOBEL1
C_SOB2_NUMBER_OF_BLOCKS, //E_KTID_SOBEL2
C_MM_NUMBER_OF_BLOCKS //E_KTID_MM
};
/*!
* @brief The device ID of the used GPU
*
* @see http://docs.nvidia.com/cuda/cuda-c-programming-guide
* @see https://www.khronos.org/registry/OpenCL/sdk/1.0/docs/man/xhtml/clGetDeviceIDs.html
*/
static uint8 gpuI_deviceID_u8 = 0;
/************************************************************************************************/
/* Persistent Kernel */
/************************************************************************************************/
/*!
* @brief The persistent kernel (GPU Daemon) which is used to reduce kernel launch latencies.
*
* The kernel arguments must include all global memory buffers of all kernels in this system, since
* this kernel is used to launch GPGPU kernel on demand. The persistent kernel reduces kernel launch
* latencies by bypassing the GPGPU driver stack when launching kernels.
*
* @see Mrozek et al. GPU Daemon: Road to zero cost submission, in Proceedings of the 4th International
* Workshop on OpenCL, Vienna, Austria, 2016 -> https://dl.acm.org/citation.cfm?id=2909450
*/
__global__ void GPUart_Persistent_Kernel
(
//Persistent Kernel Management Data
uint32* __restrict__ perKer_isRunning_u32_g,
uint32* __restrict__ perKer_eventQueueCntDevice_u32_g,
volatile uint32 * __restrict__ perKer_eventQueueCntHost_u32_g,
volatile sint32 * __restrict__ perKer_eventQueue_s32_g,
volatile uint32* __restrict__ perKer_kernelTasksRunningStates_u32_g,
//SOBEL1 Variables
sint32 * __restrict__ sob1_matrix_in_s32_g,
sint32 * __restrict__ sob1_matrix_out_s32_g,
//SOBEL2 Variables
sint32 * __restrict__ sob2_matrix_in_s32_g,
sint32 * __restrict__ sob2_matrix_out_s32_g,
//MM Variables
float32 * __restrict__ mm_matrix_A_f32_g,
float32 * __restrict__ mm_matrix_B_f32_g,
float32 * __restrict__ mm_matrix_C_f32_g,
/* Synchronization variables */
//SOBEL1
uint32 * __restrict__ sync_SOB1_flags_in_u32_g,
uint32 * __restrict__ sync_SOB1_flags_out_u32_g,
//SOBEL2
uint32 * __restrict__ sync_SOB2_flags_in_u32_g,
uint32 * __restrict__ sync_SOB2_flags_out_u32_g,
//MM
uint32 * __restrict__ sync_MM_flags_in_u32_g,
uint32 * __restrict__ sync_MM_flags_out_u32_g,
/* Preemption variables */
//SOB1
sint32 * __restrict__ preempt_SOB1_flag_g,
sint32 * __restrict__ preempt_SOB1_flag_internal_g,
sint32 * __restrict__ preempt_SOB1_sm_g,
//SOB2
sint32 * __restrict__ preempt_SOB2_flag_g,
sint32 * __restrict__ preempt_SOB2_flag_internal_g,
sint32 * __restrict__ preempt_SOB2_sm_g,
//MM
sint32 * __restrict__ preempt_MM_flag_g,
sint32 * __restrict__ preempt_MM_sm_g,
/* Buffer variables */
//SOB1
uint32 * __restrict__ sob1_buffer_loop_counter_u32_g,
//SOB2
uint32 * __restrict__ sob2_buffer_loop_counter_u32_g,
//MM
uint32 * __restrict__ mm_buffer_blockY_g,
uint32 * __restrict__ mm_buffer_blockX_g,
uint32 * __restrict__ mm_buffer_M_g
)
{
hipStream_t stream_kernel_SOB1;
hipStream_t stream_kernel_SOB2;
hipStream_t stream_kernel_MM;
hipStreamCreateWithFlags(&stream_kernel_SOB1, hipStreamNonBlocking);
hipStreamCreateWithFlags(&stream_kernel_SOB2, hipStreamNonBlocking);
hipStreamCreateWithFlags(&stream_kernel_MM, hipStreamNonBlocking);
while(C_TRUE)
{
//Check if host has issued a new event to queue
if(*perKer_eventQueueCntDevice_u32_g != *perKer_eventQueueCntHost_u32_g)
{
//Calculate position of next available event in queue
*perKer_eventQueueCntDevice_u32_g = (*perKer_eventQueueCntDevice_u32_g + 1)
% C_PERSISTENT_KERNEL_EVENT_QUEUE_LENGTH;
//Interpret new event
switch(perKer_eventQueue_s32_g[*perKer_eventQueueCntDevice_u32_g])
{
case C_PERSISTENT_KERNEL_TERMINATE: //Terminate persistent Kernel
*perKer_isRunning_u32_g = C_FALSE;
return;
case E_KTID_SOBEL1:
__syncthreads();
hipLaunchKernelGGL(( Sobel_Kernel), dim3(C_SOB1_NUMBER_OF_BLOCKS), dim3(C_SOB1_LOCAL_WORK_SIZE), 0, stream_kernel_SOB1,
sob1_matrix_in_s32_g,
sob1_matrix_out_s32_g,
C_SOB1_HEIGHT,
C_SOB1_WIDTH,
//Preemption status variables
preempt_SOB1_flag_g,
preempt_SOB1_flag_internal_g,
preempt_SOB1_sm_g,
//Buffer variables
sob1_buffer_loop_counter_u32_g,
//Synchronization variables
sync_SOB1_flags_in_u32_g,
sync_SOB1_flags_out_u32_g,
/* Running status flag */
&perKer_kernelTasksRunningStates_u32_g[E_KTID_SOBEL1]
);
__syncthreads();
break;
case E_KTID_SOBEL2:
__syncthreads();
hipLaunchKernelGGL(( Sobel_Kernel), dim3(C_SOB2_NUMBER_OF_BLOCKS), dim3(C_SOB2_LOCAL_WORK_SIZE), 0, stream_kernel_SOB2,
sob2_matrix_in_s32_g,
sob2_matrix_out_s32_g,
C_SOB2_HEIGHT,
C_SOB2_WIDTH,
//Preemption status variables
preempt_SOB2_flag_g,
preempt_SOB2_flag_internal_g,
preempt_SOB2_sm_g,
//Buffer variables
sob2_buffer_loop_counter_u32_g,
//Synchronization variables
sync_SOB2_flags_in_u32_g,
sync_SOB2_flags_out_u32_g,
/* Running status flag */
&perKer_kernelTasksRunningStates_u32_g[E_KTID_SOBEL2]
);
__syncthreads();
break;
case E_KTID_MM:
__syncthreads();
dim3 dimGridMM(C_MM_NUMBER_OF_BLOCKS_X, C_MM_NUMBER_OF_BLOCKS_Y);
dim3 dimBlockMM(C_MM_LOCAL_WORK_SIZE_X, C_MM_LOCAL_WORK_SIZE_Y);
hipLaunchKernelGGL(( MatrMul_Kernel), dim3(dimGridMM), dim3(dimBlockMM), 0, stream_kernel_MM,
//Functional Data
mm_matrix_A_f32_g,
mm_matrix_B_f32_g,
mm_matrix_C_f32_g,
//Preemption Buffer
mm_buffer_blockY_g,
mm_buffer_blockX_g,
mm_buffer_M_g,
//Preemption Managment
preempt_MM_flag_g,
preempt_MM_sm_g,
//Synchronization Flags
sync_MM_flags_in_u32_g,
sync_MM_flags_out_u32_g,
//Running status flag
&perKer_kernelTasksRunningStates_u32_g[E_KTID_MM]
);
__syncthreads();
break;
}
__threadfence_system();
}
}
}
/************************************************************************************************/
/* General function definition */
/************************************************************************************************/
/*! @brief Copy data from host memory to device memory.
*
* Device memory may be shared physical memory or discrete device memory. The device driver
* API call may depend on the type of device memory (global or texture memory).
*
* @param[in] void * variable_p -> The host variable to be copied
* @param[in] device_global_memory_id_e id_p -> The ID of the global memory variable
*
* @return GPUART_SUCCESS if memory copy operation has been successfully.
* @return GPUART_ERROR_INVALID_ARGUMENT if id_p is an invalid ID.
*/
GPUart_Retval gpuI_memcpyHost2Device(void * variable_p, device_global_memory_id_e id_p)
{
GPUart_Retval retval = GPUART_SUCCESS;
device_global_memory_s device_memory;
if((id_p >= E_GM_TOTAL_NR_OF_GLOB_MEM_VARIABLES)||(variable_p == NULL))
{
retval = GPUART_ERROR_INVALID_ARGUMENT;
}
else
{
device_memory = global_memory_list_a[id_p];
#ifdef S_USE_ZERO_COPY_FOR_GLOBAL_APPLICATION_MEMORY
memcpy(*device_memory.host_ptr, variable_p, device_memory.mem_size);
#else
CUDA_CHECK_RETURN(hipMemcpyAsync(*device_memory.mem_ptr, variable_p, device_memory.mem_size,
hipMemcpyHostToDevice, memory_command_queue_s));
#endif
}
return retval;
}
/*! @brief Copy data from device memory to host memory.
*
* Device memory may be shared physical memory or discrete device memory. The device driver
* API call may depend on the type of device memory (global or texture memory).
*
* @param[out] void * variable_p -> The host variable to be written
* @param[in] device_global_memory_id_e id_p -> The ID of the global memory variable
*
* @return GPUART_SUCCESS if memory copy operation has been successfully.
* @return GPUART_ERROR_INVALID_ARGUMENT if id_p is an invalid ID.
*/
GPUart_Retval gpuI_memcpyDevice2Host(void * variable_p, device_global_memory_id_e id_p)
{
GPUart_Retval retval = GPUART_SUCCESS;
device_global_memory_s device_memory;
if((id_p >= E_GM_TOTAL_NR_OF_GLOB_MEM_VARIABLES)||(variable_p == NULL))
{
retval = GPUART_ERROR_INVALID_ARGUMENT;
}
else
{
device_memory = global_memory_list_a[id_p];
#ifdef S_USE_ZERO_COPY_FOR_GLOBAL_APPLICATION_MEMORY
memcpy(variable_p, *device_memory.host_ptr, device_memory.mem_size);
#else
CUDA_CHECK_RETURN(hipMemcpyAsync(variable_p, *device_memory.mem_ptr, device_memory.mem_size,
hipMemcpyDeviceToHost, memory_command_queue_s));
#endif
}
return retval;
}
/*************************************************************************************************
Function: gpuI_memcpyConstantMemory
Description: Copies data from host memory to constant device memory. The copy is only possible
if persistent GPUart kernel is not running, since a constant memory variable is
immutable during kernel execution and its value is inherited from parent to child
kernel.
*/
/*! @brief Copy data from host memory to constant device memory.
*
* The copy is only possible if persistent GPUart kernel #GPUart_Persistent_Kernel
* is not running, since a constant memory data is immutable during kernel execution
* and its value is inherited from parent to child kernel.
*
* @param[in] void * variable_p -> The host variable to be copied
* @param[in] device_constant_memory_id_e id_p -> The ID of the constant memory buffer
*
* @return GPUART_SUCCESS if memory copy operation has been successfully.
* @return GPUART_ERROR_INVALID_ARGUMENT if id_p is an invalid ID.
*/
GPUart_Retval gpuI_memcpyConstantMemory(void * variable_p, device_constant_memory_id_e id_p)
{
GPUart_Retval retval = GPUART_SUCCESS;
device_constant_memory_s device_memory;
if((id_p >= E_CM_TOTAL_NR_OF_CONST_MEM_VARIABLES) || (variable_p == NULL))
{
retval = GPUART_ERROR_INVALID_ARGUMENT;
}
else
{
if(*perKer_isRunning_u32_host == C_TRUE)
{
retval = GPUART_ERROR_PESISTENT_KERNEL_IS_RUNNING;
}
else
{
device_memory = constant_memory_list_a[id_p];
CUDA_CHECK_RETURN(hipMemcpyToSymbolAsync(*device_memory.mem_ptr, variable_p, device_memory.mem_size, 0,
hipMemcpyHostToDevice, memory_command_queue_s));
CUDA_CHECK_RETURN(hipStreamSynchronize(memory_command_queue_s));
}
}
return retval;
}
/*!
* @brief Request the launch of a GPGPU kernel.
*
* @param kernel_task_id_e task_id_e -> The ID of the kernel to be launched.
*
* @return GPUART_SUCCESS if kernel launch has been successfully.
* @return GPUART_ERROR_NOT_READY if launch request is already active.
*/
GPUart_Retval gpuI_runJob(kernel_task_id_e task_id_e)
{
GPUart_Retval retval = GPUART_SUCCESS;
uint32 eventQueueCntHost_u32_l;
uint32 kernelStatus = ((volatile uint32 *)perKer_kernelTasksRunningStates_u32_host)[task_id_e];
if((kernelStatus == C_KERNEL_SUSPENDED)||
(kernelStatus == C_KERNEL_READY)||
(kernelStatus == C_KERNEL_INIT))
{
perKer_kernelTasksRunningStates_u32_host[task_id_e] = C_KERNEL_ACTIVE;
// //Reset Preemption flag
if(device_preemption_flags_a[task_id_e] != NULL)
{
// printf("-> Setze PreemptionFlag zurueck fue Kernel %d", task_id_e);
**device_preemption_flags_a[task_id_e] = C_FALSE;
}
//Reset state machine
if((kernelStatus == C_KERNEL_READY)||(kernelStatus == C_KERNEL_INIT))
{
//Do not reset Kernel SM if kernel has been preempted
if(device_kernel_task_SM_a[task_id_e] != NULL)
{
//**device_kernel_task_SM_a[task_id_e] = 0; --> Old. Now, all SMs of an Kernel are set to zero
memset((void *)*device_kernel_task_SM_a[task_id_e], 0, nb_of_StateMachines_in_kernel_a[task_id_e] * sizeof(sint32));
}
}
//Calculate next position in persistent kernel event queue
eventQueueCntHost_u32_l = (perKer_eventQueueCntHost_u32_host[0] + 1)
% C_PERSISTENT_KERNEL_EVENT_QUEUE_LENGTH;
//Set kernel call event
perKer_eventQueue_s32_host[eventQueueCntHost_u32_l] = task_id_e;
//Make new event visible
*perKer_eventQueueCntHost_u32_host = eventQueueCntHost_u32_l;
if((eventQueueCntHost_u32_l == UINT32_MAX )||(eventQueueCntHost_u32_l > C_PERSISTENT_KERNEL_EVENT_QUEUE_LENGTH))
{
printf("\nFEHLER: Host Counter falsch");
}
}
else
{
retval = GPUART_ERROR_NOT_READY;
}
return retval;
}
/*************************************************************************************************
Function: gpuI_preemptJob
Description: Issue preemption of a specific kernel task
*/
GPUart_Retval gpuI_preemptJob(kernel_task_id_e task_id_p)
{
GPUart_Retval retval = GPUART_SUCCESS;
//Check if kernel task is preemptive
if(preemption_enabled_a[task_id_p] == C_TRUE)
{
//Set preemption flag
**device_preemption_flags_a[task_id_p] = C_TRUE;
}
else
{
//Kernel task is not preemptive -> no operation
retval = GPUART_ERROR_NO_OPERTATION;
}
return retval;
}
/*************************************************************************************************
Function: gpuI_queryKernelIsRunning
Description: Query kernel running status.
Returns GPUART_SUCCESS if kernel task is not running.
Returns GPUART_ERROR_NOT_READY if kernel task is still running.
*/
uint32 gpuI_queryKernelIsRunning(kernel_task_id_e task_id_e)
{
uint32 retval = C_TRUE;
//Query stream whether there is a running operation
if((perKer_kernelTasksRunningStates_u32_host[task_id_e] == C_KERNEL_TERMINATED_SUCESSFUL)||
(perKer_kernelTasksRunningStates_u32_host[task_id_e] == C_KERNEL_SUSPENDED)||
(perKer_kernelTasksRunningStates_u32_host[task_id_e] == C_KERNEL_INIT))
{
//Kernel task is not running -> success
retval = C_FALSE;
}
else
{
//Kernel is still running
retval = C_TRUE;
}
return retval;
}
/*************************************************************************************************
Function: gpuI_queryKernelTerminatedSuccessful
Description: Query kernel running status.
Returns GPUART_SUCCESS if kernel task is not running.
Returns GPUART_ERROR_NOT_READY if kernel task is still running.
*/
uint32 gpuI_queryKernelTerminatedSuccessful(kernel_task_id_e task_id_e)
{
uint32 retval = C_TRUE;
//Query stream whether there is a running operation
if(perKer_kernelTasksRunningStates_u32_host[task_id_e] == C_KERNEL_TERMINATED_SUCESSFUL)
{
//Kernel task is not running -> success
}
else
{
//Kernel is still running
retval = C_FALSE;
}
return retval;
}
/*************************************************************************************************
Function: gpuI_queryKernelTerminatedSuccessful
Description: Query kernel running status.
Returns GPUART_SUCCESS if kernel task is not running.
Returns GPUART_ERROR_NOT_READY if kernel task is still running.
*/
uint32 gpuI_queryKernelPreempted(kernel_task_id_e task_id_e)
{
uint32 retval = C_TRUE;
//Query stream whether there is a running operation
if(perKer_kernelTasksRunningStates_u32_host[task_id_e] == C_KERNEL_SUSPENDED)
{
//Kernel task is not running -> success
}
else
{
//Kernel is still running
retval = C_FALSE;
}
return retval;
}
/*************************************************************************************************
Function: gpuI_getJobCosts
Description: Returns the number of thread blocks, i.e. the number of Multiprocessors used for
this kernel.
*/
uint32 gpuI_getJobCosts(kernel_task_id_e task_id_e)
{
uint32 retval = kernel_job_costs[task_id_e];
if(retval > max_costs_per_kernel)
{
retval = max_costs_per_kernel;
}
return retval;
}
/*************************************************************************************************
Function: gpuI_getJobCosts
Description: Sets the internal status of the corresponding kernel to ready. This function is
called after a new job has been enqueued.
*/
GPUart_Retval gpuI_SetKernelStatusReady(kernel_task_id_e task_id_e)
{
GPUart_Retval retval = GPUART_SUCCESS;
perKer_kernelTasksRunningStates_u32_host[task_id_e] = C_KERNEL_READY;
return retval;
}
/*************************************************************************************************
Function: gpuI_getJobCosts
Description: Returns the number of thread blocks, i.e. the number of Multiprocessors used for
this kernel.
*/
GPUart_Retval gpuI_get_NrOfMultiprocessors(uint32* nrOfMultprocessors, uint32 resourceFactor)
{
GPUart_Retval retval = GPUART_SUCCESS;
hipDeviceProp_t deviceProp_s;
CUDA_CHECK_RETURN(hipGetDeviceProperties(&deviceProp_s, gpuI_deviceID_u8));
*nrOfMultprocessors = deviceProp_s.multiProcessorCount * resourceFactor;
max_costs_per_kernel = deviceProp_s.multiProcessorCount * resourceFactor;
printf("\nNumber of multiprocessors on the device: %d", *nrOfMultprocessors);
if(*nrOfMultprocessors == 0)
{
retval = GPUART_NO_SUCCESS;
}
return retval;
}
/*************************************************************************************************
Function: gpuI_init()
Description: Initializes GPGPU Runtime, thus it initializes command_queues, device variables
and host variables.
*/
GPUart_Retval gpuI_init(void)
{
GPUart_Retval retval = GPUART_SUCCESS;
int deviceCount_u32 = 0;
CUDA_CHECK_RETURN(hipDeviceReset());
CUDA_CHECK_RETURN(hipGetDeviceCount(&deviceCount_u32));
for (int i = 0; i < deviceCount_u32; i++) {
hipDeviceProp_t prop;
CUDA_CHECK_RETURN(hipGetDeviceProperties(&prop, i));
if(prop.integrated)
{
printf("\nDevice %d with shared physical memory selected", i);
printf("\nMax Block Size: %d", prop.maxThreadsPerBlock);
printf("\nRegs per SM: %d", prop.regsPerMultiprocessor);
printf("\nShared memory per SM: %lu", prop.sharedMemPerBlock);
gpuI_deviceID_u8 = i;
break;
}
}
CUDA_CHECK_RETURN(hipSetDevice(gpuI_deviceID_u8));
/* Initialize device configurations */
CUDA_CHECK_RETURN(hipSetDeviceFlags(hipDeviceMapHost));
CUDA_CHECK_RETURN(hipDeviceSynchronize());
/* Initialize command queues */
CUDA_CHECK_RETURN( hipStreamCreate(&memory_command_queue_s) );
CUDA_CHECK_RETURN( hipStreamCreate(&persistent_kernel_command_queue_s) );
/* Device only variables */
/* Sobel1 ***********************************************************************************************************/
/* Initialize synchronization flags*/
CUDA_CHECK_RETURN( hipMalloc( (void **)&sync_SOB1_flags_in_u32_g, C_SOB1_NUMBER_OF_BLOCKS * sizeof(uint32)) );
CUDA_CHECK_RETURN( hipMalloc( (void **)&sync_SOB1_flags_out_u32_g, C_SOB1_NUMBER_OF_BLOCKS * sizeof(uint32)) );
/* Initialize preemption managment variables*/
CUDA_CHECK_RETURN( hipHostMalloc( (void **)&preempt_SOB1_flag_host, sizeof(sint32)) );
CUDA_CHECK_RETURN( hipHostGetDevicePointer( (void **)&preempt_SOB1_flag_g, (void *)preempt_SOB1_flag_host, 0) );
CUDA_CHECK_RETURN( hipMalloc( (void **)&preempt_SOB1_flag_internal_g, sizeof(sint32)) );
CUDA_CHECK_RETURN( hipHostMalloc( (void **)&preempt_SOB1_sm_host, sizeof(sint32)) );
CUDA_CHECK_RETURN( hipHostGetDevicePointer( (void **)&preempt_SOB1_sm_g, (void *)preempt_SOB1_sm_host, 0) );
/* Initialize preemption buffer*/
CUDA_CHECK_RETURN( hipMalloc( (void **)&sob1_buffer_loop_counter_u32_g, C_SOB1_GLOBAL_WORK_SIZE * sizeof(uint32)) );
/* Sobel2 ***********************************************************************************************************/
/* Initialize synchronization flags*/
CUDA_CHECK_RETURN( hipMalloc( (void **)&sync_SOB2_flags_in_u32_g, C_SOB2_NUMBER_OF_BLOCKS * sizeof(uint32)) );
CUDA_CHECK_RETURN( hipMalloc( (void **)&sync_SOB2_flags_out_u32_g, C_SOB2_NUMBER_OF_BLOCKS * sizeof(uint32)) );
/* Initialize preemption managment variables*/
CUDA_CHECK_RETURN( hipHostMalloc( (void **)&preempt_SOB2_flag_host, sizeof(sint32)) );
CUDA_CHECK_RETURN( hipHostGetDevicePointer( (void **)&preempt_SOB2_flag_g, (void *)preempt_SOB2_flag_host, 0) );
CUDA_CHECK_RETURN( hipMalloc( (void **)&preempt_SOB2_flag_internal_g, sizeof(sint32)) );
CUDA_CHECK_RETURN( hipHostMalloc( (void **)&preempt_SOB2_sm_host, sizeof(sint32)) );
CUDA_CHECK_RETURN( hipHostGetDevicePointer( (void **)&preempt_SOB2_sm_g, (void *)preempt_SOB2_sm_host, 0) );
/* Initialize preemption buffer*/
CUDA_CHECK_RETURN( hipMalloc( (void **)&sob2_buffer_loop_counter_u32_g, C_SOB2_GLOBAL_WORK_SIZE * sizeof(uint32)) );
/* MatrMul *********************************************************************************************************/
/* Initialize synchronization flags*/
CUDA_CHECK_RETURN( hipMalloc( (void **)&sync_MM_flags_in_u32_g, C_MM_NUMBER_OF_BLOCKS * sizeof(uint32)) );
CUDA_CHECK_RETURN( hipMalloc( (void **)&sync_MM_flags_out_u32_g, C_MM_NUMBER_OF_BLOCKS * sizeof(uint32)) );
/* Initialize preemption managment variables*/
CUDA_CHECK_RETURN( hipHostMalloc( (void **)&preempt_MM_flag_host, sizeof(sint32)) );
CUDA_CHECK_RETURN( hipHostGetDevicePointer( (void **)&preempt_MM_flag_g, (void *)preempt_MM_flag_host, 0) );
CUDA_CHECK_RETURN( hipHostMalloc( (void **)&preempt_MM_sm_host, C_MM_NUMBER_OF_BLOCKS * sizeof(sint32)) );
CUDA_CHECK_RETURN( hipHostGetDevicePointer( (void **)&preempt_MM_sm_g, (void *)preempt_MM_sm_host, 0) );
/* Initialize preemption buffer*/
CUDA_CHECK_RETURN( hipMalloc( (void **)&mm_buffer_blockY_g, C_MM_NUMBER_OF_BLOCKS * sizeof(uint32)) );
CUDA_CHECK_RETURN( hipMalloc( (void **)&mm_buffer_blockX_g, C_MM_NUMBER_OF_BLOCKS * sizeof(uint32)) );
CUDA_CHECK_RETURN( hipMalloc( (void **)&mm_buffer_M_g, C_MM_NUMBER_OF_BLOCKS * sizeof(uint32)) );
/* Initialize persistent kernel management variables */
CUDA_CHECK_RETURN( hipHostMalloc( (void **) &perKer_isRunning_u32_host, sizeof(uint32)) );
CUDA_CHECK_RETURN( hipHostGetDevicePointer( (void **)&perKer_isRunning_u32_g, (void *)perKer_isRunning_u32_host, 0) );
CUDA_CHECK_RETURN( hipHostMalloc( (void **) &perKer_eventQueueCntDevice_u32_host, sizeof(uint32)) );
CUDA_CHECK_RETURN( hipHostGetDevicePointer( (void **)&perKer_eventQueueCntDevice_u32_g, (void *)perKer_eventQueueCntDevice_u32_host, 0) );
CUDA_CHECK_RETURN( hipHostMalloc( (void **) &perKer_eventQueueCntHost_u32_host, sizeof(uint32)) );
CUDA_CHECK_RETURN( hipHostGetDevicePointer( (void **)&perKer_eventQueueCntHost_u32_g, (void *)perKer_eventQueueCntHost_u32_host, 0) );
CUDA_CHECK_RETURN( hipHostMalloc( (void **) &perKer_eventQueue_s32_host, C_PERSISTENT_KERNEL_EVENT_QUEUE_LENGTH * sizeof(sint32)) );
CUDA_CHECK_RETURN( hipHostGetDevicePointer( (void **)&perKer_eventQueue_s32_g, (void *)perKer_eventQueue_s32_host, 0) );
CUDA_CHECK_RETURN( hipHostMalloc( (void **) &perKer_kernelTasksRunningStates_u32_host, E_KTID_NUMBER_OF_KERNEL_TASKS * sizeof(uint32)) );
CUDA_CHECK_RETURN( hipHostGetDevicePointer( (void **)&perKer_kernelTasksRunningStates_u32_g, (void *)perKer_kernelTasksRunningStates_u32_host, 0) );
/* Initialize global device application variables */
for(int i = 0; i < E_GM_TOTAL_NR_OF_GLOB_MEM_VARIABLES; i++ )
{
#ifdef S_USE_ZERO_COPY_FOR_GLOBAL_APPLICATION_MEMORY
CUDA_CHECK_RETURN( hipHostMalloc( (void **)global_memory_list_a[i].host_ptr, global_memory_list_a[i].mem_size) );
CUDA_CHECK_RETURN( hipHostGetDevicePointer( (void **)global_memory_list_a[i].mem_ptr, (void *) *global_memory_list_a[i].host_ptr, 0) );
#else
CUDA_CHECK_RETURN( hipMalloc((void **)global_memory_list_a[i].mem_ptr, global_memory_list_a[i].mem_size) );
#endif
}
//Initialize status variables
*perKer_isRunning_u32_host = 0;
*perKer_eventQueueCntDevice_u32_host = 0;
*perKer_eventQueueCntHost_u32_host = 0;
for(int i = 0; i < E_KTID_NUMBER_OF_KERNEL_TASKS; i++)
{
perKer_kernelTasksRunningStates_u32_host[i] = C_KERNEL_INIT;
if(device_preemption_flags_a[i] != NULL)
{
**device_preemption_flags_a[i] = C_FALSE;
}
if(device_kernel_task_SM_a[i] != NULL)
{
**device_preemption_flags_a[i] = C_FALSE;
}
}
return retval;
}
//TODO:Wird der persistent Kernel gestartet, so sollte ein Flag gesetzt werden, was das Schreiben von COnstanten variablen ablehnt
/*************************************************************************************************
Function: gpuI_start()
Description: Start execution of persistent GPUart kernel.
*/
GPUart_Retval gpuI_start(void)
{
GPUart_Retval retval = GPUART_SUCCESS;
*perKer_isRunning_u32_host = C_TRUE; //After setting this flag constant memory writes are disabled
CUDA_CHECK_RETURN(hipDeviceSynchronize());
hipLaunchKernelGGL(( GPUart_Persistent_Kernel) , dim3(1), dim3(1), 0, persistent_kernel_command_queue_s,
perKer_isRunning_u32_g,
perKer_eventQueueCntDevice_u32_g,
perKer_eventQueueCntHost_u32_g,
perKer_eventQueue_s32_g,
perKer_kernelTasksRunningStates_u32_g,
//Sobel1 variables
sob1_matrix_in_s32_g,
sob1_matrix_out_s32_g,
//Sobel2 variables
sob2_matrix_in_s32_g,
sob2_matrix_out_s32_g,
//MM variables
mm_matrix_A_f32_g,
mm_matrix_B_f32_g,
mm_matrix_C_f32_g,
//Synchronization variables
sync_SOB1_flags_in_u32_g,
sync_SOB1_flags_out_u32_g,
sync_SOB2_flags_in_u32_g,
sync_SOB2_flags_out_u32_g,
sync_MM_flags_in_u32_g,
sync_MM_flags_out_u32_g,
//Preemption variables
preempt_SOB1_flag_g,
preempt_SOB1_flag_internal_g,
preempt_SOB1_sm_g,
preempt_SOB2_flag_g,
preempt_SOB2_flag_internal_g,
preempt_SOB2_sm_g,
preempt_MM_flag_g,
preempt_MM_sm_g,
//Buffer variables
//SOB1
sob1_buffer_loop_counter_u32_g,
//SOB2
sob2_buffer_loop_counter_u32_g,
//MM
mm_buffer_blockY_g,
mm_buffer_blockX_g,
mm_buffer_M_g
);
printf(".. started");
fflush(stdout);
return retval;
}
/*************************************************************************************************
Function: gpuI_stop()
Description: Stop execution of persisten GPUart kernel.
*/
GPUart_Retval gpuI_stop(void)
{
GPUart_Retval retval = GPUART_SUCCESS;
uint32 eventQueueCntHost_u32_l;
printf("\nSTOP PERSISTENT KERNEL");
//Calculate next position in persistent kernel event queue
eventQueueCntHost_u32_l = (*perKer_eventQueueCntHost_u32_host + 1) % C_PERSISTENT_KERNEL_EVENT_QUEUE_LENGTH;
//Set termination event
perKer_eventQueue_s32_host[eventQueueCntHost_u32_l] = C_PERSISTENT_KERNEL_TERMINATE;
//Make new event visible
*perKer_eventQueueCntHost_u32_host = eventQueueCntHost_u32_l;
return retval;
}
/*************************************************************************************************
Function: gpuI_destroy()
Description: Terminates GPUart.
Free dedicated or shared device memory. Destroy command_queues.
*/
GPUart_Retval gpuI_destroy(void)
{
GPUart_Retval retval = GPUART_SUCCESS;
CUDA_CHECK_RETURN(hipDeviceSynchronize());
/* Free global device variables */
for(int i = 0; i < (int)E_GM_TOTAL_NR_OF_GLOB_MEM_VARIABLES; i++ )
{
#ifdef S_USE_ZERO_COPY_FOR_GLOBAL_APPLICATION_MEMORY
CUDA_CHECK_RETURN( hipHostFree(*global_memory_list_a[i].host_ptr) );
#else
CUDA_CHECK_RETURN( hipFree(*global_memory_list_a[i].mem_ptr) );
#endif
}
/* Destroy device only variables */
/* Destroy persistent kernel variables */
CUDA_CHECK_RETURN(hipHostFree((void *)perKer_isRunning_u32_host));
CUDA_CHECK_RETURN(hipHostFree((void *)perKer_eventQueueCntDevice_u32_host));
CUDA_CHECK_RETURN(hipHostFree((void *)perKer_eventQueueCntHost_u32_host));
CUDA_CHECK_RETURN(hipHostFree((void *)perKer_eventQueue_s32_host));
CUDA_CHECK_RETURN(hipHostFree((void *)perKer_kernelTasksRunningStates_u32_host));
/* Destroy command queues */
CUDA_CHECK_RETURN( hipStreamDestroy(memory_command_queue_s) );
CUDA_CHECK_RETURN( hipStreamDestroy(persistent_kernel_command_queue_s) );
CUDA_CHECK_RETURN( hipDeviceReset());
return retval;
}
| 123d69cb297bc0acbc95e220f645e2fe0c18253a.cu | //Copyright (c) 2017-2018 Christoph A. Hartmann, Ulrich Margull and Technische Hochschule Ingolstadt (THI)
//
//Permission is hereby granted, free of charge, to any person obtaining a copy of this
//software and associated documentation files (the "Software"), to deal in the Software
//without restriction, including without limitation the rights to use, copy, modify,
//merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
//permit persons to whom the Software is furnished to do so, subject to the following
//conditions:
//
//The above copyright notice and this permission notice shall be included in all copies
//or substantial portions of the Software.
//
//THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
//INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
//PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
//HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
//OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
//SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
/*
* File: GPUart_Impl.cu
* Created by: Christoph Hartmann
* Institute: Technische Hochschule Ingolstadt
* Date: 07.04.2017 */
/********************************************************
* ___ ___ ___ ___ 3 ___ *
* | | | | | |\ /| | | | | | *
* |___ | | |___| | \/ | | | |___ | | *
* | | | |\ | | | | | | | *
* | |___| | \ | | |___| ___| | |___ *
* *
*********************************************************/
/*! @file GPUart_Impl.cu
*
* @brief Implementation of the management component of the GPUart Implemenation layer.
*
* This file concentrates all GPGPU related memory declarations and allocations, memory transfers
* operations, kernel launches, kernel initialisations, and GPU related implementation details.
*
*
* @author Christoph Hartmann
* @date Created on: 7 Apr 2017
*/
/************************************************************************************************/
/* Includes */
/************************************************************************************************/
//include header of Implementation layer
#include "GPUart_Impl.cuh"
#include "GPUart_Impl.h"
//include interfaces to other GPUart layer
#include "GPUart_Impl_Abstr_IF.h"
#include "GPUart_Impl_Sched_IF.h"
//include kernel libraries
#include "GPUart_Sobel.cuh"
#include "GPUart_MatrMul.cuh"
/************************************************************************************************/
/* Compiler Switches */
/************************************************************************************************/
/*! @brief Use zero copy memory (requires integrated GPU)
*
* This MUST be defined so far, since memory transfers over PCIe are currently not implemented completely.
*
* @see http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#zero-copy-memory
* @see https://software.intel.com/en-us/articles/getting-the-most-from-opencl-12-how-to-increase-performance-by-minimizing-buffer-copies-on-intel-processor-graphics
*/
#define S_USE_ZERO_COPY_FOR_GLOBAL_APPLICATION_MEMORY
/************************************************************************************************/
/* Constants */
/************************************************************************************************/
/*!
* @brief The length of the Event Queue, shared between GPU and CPU, used for kernel launch events.
*
* @see perKer_eventQueueCntHost_u32_host
* @see perKer_eventQueueCntDevice_u32_host
* @see perKer_eventQueue_s32_host
*/
#define C_PERSISTENT_KERNEL_EVENT_QUEUE_LENGTH (10) //Length of event queue
/*!
* @brief Event ID to indicate a termination request for the persistent kernel
*
* @see perKer_eventQueueCntHost_u32_host
* @see perKer_eventQueueCntHost_u32_g
*/
#define C_PERSISTENT_KERNEL_TERMINATE (-1) //Event ID to terminate persistent kernel
/************************************************************************************************/
/* Typedef */
/************************************************************************************************/
/*!
* @brief Typedef for command queues (streams) to abstract GPGPU-API
*
* Command queues are required to improve the concurrency of memory and kernel operatation on the GPU.
*
* @see https://developer.download.nvidia.com/CUDA/training/StreamsAndConcurrencyWebinar.pdf
* @see https://www.khronos.org/registry/OpenCL/sdk/1.0/docs/man/xhtml/clCreateCommandQueue.html
*/
typedef cudaStream_t command_queue_s;
/*!
* @brief Typedef for a struct which combines global memory pointers, their related host pointers,
* and the size of the memory buffer.
*
*/
typedef struct
{
void ** mem_ptr;
void ** host_ptr;
size_t mem_size;
}device_global_memory_s;
/*!
* @brief Typedef for a struct which combines constant memory pointers and the size of the related memory buffer.
*
*/
typedef struct
{
void ** mem_ptr;
size_t mem_size;
}device_constant_memory_s;
/************************************************************************************************/
/* General Variables */
/************************************************************************************************/
/*!
* @brief The command queue (stream) for memory operations
*/
static command_queue_s memory_command_queue_s;
/*!
* @brief The command queue (stream) for the persistent kernel
*/
static command_queue_s persistent_kernel_command_queue_s;
/*! @var perKer_isRunning_u32_host
* @brief A status flag, which represents the running status of the persistent kernel (host pointer).
* @see perKer_isRunning_u32_g
*/
/*! @var perKer_isRunning_u32_g
* @brief A status flag, which represents the running status of the persistent kernel (device pointer).
* @see perKer_isRunning_u32_host
*/
volatile uint32 *perKer_isRunning_u32_host;
uint32 *perKer_isRunning_u32_g;
/*! @var perKer_eventQueueCntHost_u32_host
* @brief The index of the tail of the event queue for kernel launches written by the host (host pointer).
* @see perKer_eventQueueCntDevice_u32_g
*/
/*! @var perKer_eventQueueCntHost_u32_g
* @brief The index of the tail of the event queue for kernel launches written by the host (device pointer).
* @see perKer_eventQueueCntHost_u32_host
*/
volatile uint32 *perKer_eventQueueCntHost_u32_host;
uint32 *perKer_eventQueueCntHost_u32_g;
/*! @var perKer_eventQueueCntDevice_u32_host
* @brief The index of the head of the event queue for kernel launches written by the device (host pointer).
* @see perKer_eventQueueCntDevice_u32_g
*/
/*! @var perKer_eventQueueCntDevice_u32_g
* @brief The index of the head of the event queue for kernel launches written by the device (device pointer).
* @see perKer_eventQueueCntDevice_u32_host
*/
volatile uint32 *perKer_eventQueueCntDevice_u32_host;
uint32 *perKer_eventQueueCntDevice_u32_g;
/*! @var perKer_eventQueue_s32_host
* @brief The event queue for kernel launch requests, written by the CPU and red by the GPU (host pointer).
*
* To request a kernel launch, write the kernel's ID (#kernel_task_id_e) into the tail of the queue.
* Write #C_PERSISTENT_KERNEL_TERMINATE to terminate the persistent kernel #GPUart_Persistent_Kernel.
* @see perKer_eventQueue_s32_g
*/
/*! @var perKer_eventQueue_s32_g
* @brief The event queue for kernel launch requests, written by the CPU and red by the GPU (device pointer).
*
* To request a kernel launch, write the kernel's ID (#kernel_task_id_e) into the tail of the queue.
* Write #C_PERSISTENT_KERNEL_TERMINATE to terminate the persistent kernel #GPUart_Persistent_Kernel.
* @see perKer_eventQueue_s32_host
*/
volatile sint32 *perKer_eventQueue_s32_host;
sint32 *perKer_eventQueue_s32_g;
/*! @var perKer_kernelTasksRunningStates_u32_host
* @brief A status flag, which represents the running status of each kernel (host pointer).
* @see perKer_kernelTasksRunningStates_u32_g
*/
/*! @var perKer_kernelTasksRunningStates_u32_g
* @brief A status flag, which represents the running status of each kernel (device pointer).
* @see perKer_kernelTasksRunningStates_u32_host
*/
volatile uint32 *perKer_kernelTasksRunningStates_u32_host;
uint32 *perKer_kernelTasksRunningStates_u32_g;
/*!
* @brief The allowed job cost per kernel
*
* This value is equal to m * µ, whereby m is the number of Streaming Multiprocessors of the GPU
* #gpuS_nrOfMultiprocessor_u32 and µ is the resource factor #C_GPUS_RESOURCE_FACTOR.
*
* @see kernel_task_id_e
* @see C_GPUS_RESOURCE_FACTOR
* @see gpuS_nrOfMultiprocessor_u32
* @see kernel_job_costs
*/
uint32 max_costs_per_kernel = 0;
/************************************************************************************************/
/* Kernel Task Variables - E_KTID_SOBEL1 */
/************************************************************************************************/
sint32 * sob1_matrix_in_s32_g, * sob1_matrix_in_s32_host;
sint32 * sob1_matrix_out_s32_g, * sob1_matrix_out_s32_host;
/* Synchronization variables */
uint32 * sync_SOB1_flags_in_u32_g;
uint32 * sync_SOB1_flags_out_u32_g;
/* Preemption related variables*/
sint32 * preempt_SOB1_flag_g;
volatile sint32 *preempt_SOB1_flag_host;
sint32 * preempt_SOB1_flag_internal_g;
sint32 * preempt_SOB1_sm_g;
volatile sint32 *preempt_SOB1_sm_host;
/* Buffer variables */
uint32 * sob1_buffer_loop_counter_u32_g;
/************************************************************************************************/
/* Kernel Task Variables - E_KTID_SOBEL2 */
/************************************************************************************************/
sint32 * sob2_matrix_in_s32_g, * sob2_matrix_in_s32_host;
sint32 * sob2_matrix_out_s32_g, * sob2_matrix_out_s32_host;
/* Synchronization variables */
uint32 * sync_SOB2_flags_in_u32_g;
uint32 * sync_SOB2_flags_out_u32_g;
/* Preemption related variables*/
sint32 * preempt_SOB2_flag_g;
volatile sint32 *preempt_SOB2_flag_host;
sint32 * preempt_SOB2_flag_internal_g;
sint32 * preempt_SOB2_sm_g;
volatile sint32 *preempt_SOB2_sm_host;
/* Buffer variables */
uint32 * sob2_buffer_loop_counter_u32_g;
/************************************************************************************************/
/* Kernel Task Variables - E_KTID_MM */
/************************************************************************************************/
float32 * mm_matrix_A_f32_g, * mm_matrix_A_f32_host;
float32 * mm_matrix_B_f32_g, * mm_matrix_B_f32_host;
float32 * mm_matrix_C_f32_g, * mm_matrix_C_f32_host;
/* Synchronization variables */
uint32 * sync_MM_flags_in_u32_g;
uint32 * sync_MM_flags_out_u32_g;
/* Preemption related variables*/
sint32 * preempt_MM_flag_g;
volatile sint32 *preempt_MM_flag_host;
sint32 * preempt_MM_sm_g;
volatile sint32 *preempt_MM_sm_host;
/* Buffer variables */
uint32 * mm_buffer_blockY_g;
uint32 * mm_buffer_blockX_g;
uint32 * mm_buffer_M_g;
/************************************************************************************************/
/* Constant Variable Table */
/************************************************************************************************/
/*!
* @brief The constant memory table
*
* All constant memory buffers which must be written during runtime must be defined here.
* The i'th element represents the i'th constant memory buffer, define by #device_constant_memory_id_e
* in GPUart_Config.h. Each element must defined in the following style: { (void **)& CONSTANT_BUFFER_NAME,
* SIZE_IN_BYTES }.
*
* @see device_constant_memory_id_e
*/
static device_constant_memory_s constant_memory_list_a[E_CM_TOTAL_NR_OF_CONST_MEM_VARIABLES] =
{
//{ (void **)& VARIABLE_NAME, SIZE IN BYTES }
};
/************************************************************************************************/
/* Global Variable Table */
/************************************************************************************************/
/*!
* @brief The global memory table
*
* All global memory buffers which must be written or red during runtime must be defined here.
* The i'th element represents the i'th global memory buffer, define by #device_global_memory_id_e
* in GPUart_Config.h. Each element must defined in the following style: { (void **)&
* GLOBAL_MEMORY_BUFFER_POINTER_DEVICE, GLOBAL_MEMORY_BUFFER_POINTER_HOST, SIZE_IN_BYTES }.
*
* @see device_global_memory_id_e
*/
static device_global_memory_s global_memory_list_a[E_GM_TOTAL_NR_OF_GLOB_MEM_VARIABLES] =
{
/* Sobel1 */
{ (void **)&sob1_matrix_in_s32_g, (void **)&sob1_matrix_in_s32_host, C_SOB1_MATRIX_SIZE * sizeof(sint32) }, //E_GM_ID_SOB1_MATRIX_IN
{ (void **)&sob1_matrix_out_s32_g, (void **)&sob1_matrix_out_s32_host, C_SOB1_MATRIX_SIZE * sizeof(sint32) }, //E_GM_ID_SOB1_MATRIX_OUT
/* Sobel2 */
{ (void **)&sob2_matrix_in_s32_g, (void **)&sob2_matrix_in_s32_host, C_SOB2_MATRIX_SIZE * sizeof(sint32) }, //E_GM_ID_SOB2_MATRIX_IN
{ (void **)&sob2_matrix_out_s32_g, (void **)&sob2_matrix_out_s32_host, C_SOB2_MATRIX_SIZE * sizeof(sint32) }, //E_GM_ID_SOB2_MATRIX_OUT
/* MatrMul */
{ (void **)&mm_matrix_A_f32_g, (void **)&mm_matrix_A_f32_host, C_MM_MATRIX_TOTAL_SIZE * sizeof(sint32) }, //E_GM_ID_MM_MATRIX_A
{ (void **)&mm_matrix_B_f32_g, (void **)&mm_matrix_B_f32_host, C_MM_MATRIX_TOTAL_SIZE * sizeof(sint32) }, //E_GM_ID_MM_MATRIX_B
{ (void **)&mm_matrix_C_f32_g, (void **)&mm_matrix_C_f32_host, C_MM_MATRIX_TOTAL_SIZE * sizeof(sint32) } //E_GM_ID_MM_MATRIX_C
};
/************************************************************************************************/
/* Preemption Flag Table */
/************************************************************************************************/
/*!
* @brief The preemption flag table
*
* All preemption flags must be included by this table.
* The i'th element represents the i'th kernel, according to the enum #kernel_task_id_e
* in GPUart_Config.h. Each element must defined in the following style: (volatile sint32**)&
* NAME_OF_PREEMPTION_FLAG_POINTER. If a kernel does not implement a preemption flag, because it
* is non-preemptive, insert a NULL.
*
* @see kernel_task_id_e
*/
static volatile sint32** device_preemption_flags_a[E_KTID_NUMBER_OF_KERNEL_TASKS] =
{
(volatile sint32**) &preempt_SOB1_flag_host, //E_KTID_SOBEL1
(volatile sint32**) &preempt_SOB2_flag_host, //E_KTID_SOBEL2
(volatile sint32**) &preempt_MM_flag_host //E_KTID_MM
};
/************************************************************************************************/
/* Preemption Enabled Parameter Table */
/************************************************************************************************/
/*!
* @brief The preemption enabled table
*
* The i'th element represents the i'th kernel, according to the enum #kernel_task_id_e
* in GPUart_Config.h. Each element must defined in the following style: #C_TRUE if the related kernel
* is preemptive; #C_FALSE if the related kernel is non-preemptive.
*
* @see kernel_task_id_e
*/
const static sint32 preemption_enabled_a[E_KTID_NUMBER_OF_KERNEL_TASKS] =
{
C_TRUE, //E_KTID_SOBEL1
C_TRUE, //E_KTID_SOBEL2
C_TRUE //E_KTID_MM
};
/************************************************************************************************/
/* Kernel State Machine Table */
/************************************************************************************************/
/*!
* @brief The kernel state machine table
*
* The i'th element represents the i'th kernel, according to the enum #kernel_task_id_e
* in GPUart_Config.h. Each element must defined in the following style: &NAME_OF_STATE_MACHINE_POINTER.
* Use NULL if the related kernel is non-preemptive.
*
* @see kernel_task_id_e
*/
static volatile sint32** device_kernel_task_SM_a[E_KTID_NUMBER_OF_KERNEL_TASKS] =
{
&preempt_SOB1_sm_host, //E_KTID_SOBEL1
&preempt_SOB2_sm_host, //E_KTID_SOBEL2
&preempt_MM_sm_host //E_KTID_MM
};
/*!
* @brief The number of state machines table
*
* The i'th element represents the i'th kernel, according to the enum kernel_task_id_e
* in GPUart_Config.h. Each element must defined in the following style: NUMBER_OF_SM_IN_KERNEL.
* If a kernel preempts grid-synchronous then use the value 1u. If a kernel preempts thread-block
* synchronous then use the number of thread blocks of this kernel. If a kernel is non-preemptive
* then use 0u.
*
* @see kernel_task_id_e
*/
static uint32 nb_of_StateMachines_in_kernel_a[E_KTID_NUMBER_OF_KERNEL_TASKS] =
{
1u, //E_KTID_SOBEL1 -> Grid-wide preemption
1u, //E_KTID_SOBEL2 -> Grid-wide preemption
C_MM_NUMBER_OF_BLOCKS //E_KTID_MM -> Thread block-wide preemption
};
/************************************************************************************************/
/* Kernel Cost Table */
/************************************************************************************************/
/*!
* @brief The job cost table
*
* The i'th element represents the i'th kernel, according to the enum kernel_task_id_e
* in GPUart_Config.h. Each element represents the job costs of the related kernel.
* If a thread block of a kernel requires more then 1/µ of the available registers, shared memory,
* thread residency slots, or thread block residency slots of an Streaming Multiprocessor,
* then set corresponding value to m * µ, whereby µ is the resource factor and m is the GPU's
* number of Streaming Multiprocessors. If a thread block of a kernel requires less then 1/µ of each
* resource type, then set the corresponding value to the kernels number of thread blocks.
*
* @see kernel_task_id_e
* @see C_GPUS_RESOURCE_FACTOR
* @see gpuS_nrOfMultiprocessor_u32
* @see max_costs_per_kernel
*/
static uint8 kernel_job_costs[E_KTID_NUMBER_OF_KERNEL_TASKS] =
{
C_SOB1_NUMBER_OF_BLOCKS, //E_KTID_SOBEL1
C_SOB2_NUMBER_OF_BLOCKS, //E_KTID_SOBEL2
C_MM_NUMBER_OF_BLOCKS //E_KTID_MM
};
/*!
* @brief The device ID of the used GPU
*
* @see http://docs.nvidia.com/cuda/cuda-c-programming-guide
* @see https://www.khronos.org/registry/OpenCL/sdk/1.0/docs/man/xhtml/clGetDeviceIDs.html
*/
static uint8 gpuI_deviceID_u8 = 0;
/************************************************************************************************/
/* Persistent Kernel */
/************************************************************************************************/
/*!
* @brief The persistent kernel (GPU Daemon) which is used to reduce kernel launch latencies.
*
* The kernel arguments must include all global memory buffers of all kernels in this system, since
* this kernel is used to launch GPGPU kernel on demand. The persistent kernel reduces kernel launch
* latencies by bypassing the GPGPU driver stack when launching kernels.
*
* @see Mrozek et al. GPU Daemon: Road to zero cost submission, in Proceedings of the 4th International
* Workshop on OpenCL, Vienna, Austria, 2016 -> https://dl.acm.org/citation.cfm?id=2909450
*/
__global__ void GPUart_Persistent_Kernel
(
//Persistent Kernel Management Data
uint32* __restrict__ perKer_isRunning_u32_g,
uint32* __restrict__ perKer_eventQueueCntDevice_u32_g,
volatile uint32 * __restrict__ perKer_eventQueueCntHost_u32_g,
volatile sint32 * __restrict__ perKer_eventQueue_s32_g,
volatile uint32* __restrict__ perKer_kernelTasksRunningStates_u32_g,
//SOBEL1 Variables
sint32 * __restrict__ sob1_matrix_in_s32_g,
sint32 * __restrict__ sob1_matrix_out_s32_g,
//SOBEL2 Variables
sint32 * __restrict__ sob2_matrix_in_s32_g,
sint32 * __restrict__ sob2_matrix_out_s32_g,
//MM Variables
float32 * __restrict__ mm_matrix_A_f32_g,
float32 * __restrict__ mm_matrix_B_f32_g,
float32 * __restrict__ mm_matrix_C_f32_g,
/* Synchronization variables */
//SOBEL1
uint32 * __restrict__ sync_SOB1_flags_in_u32_g,
uint32 * __restrict__ sync_SOB1_flags_out_u32_g,
//SOBEL2
uint32 * __restrict__ sync_SOB2_flags_in_u32_g,
uint32 * __restrict__ sync_SOB2_flags_out_u32_g,
//MM
uint32 * __restrict__ sync_MM_flags_in_u32_g,
uint32 * __restrict__ sync_MM_flags_out_u32_g,
/* Preemption variables */
//SOB1
sint32 * __restrict__ preempt_SOB1_flag_g,
sint32 * __restrict__ preempt_SOB1_flag_internal_g,
sint32 * __restrict__ preempt_SOB1_sm_g,
//SOB2
sint32 * __restrict__ preempt_SOB2_flag_g,
sint32 * __restrict__ preempt_SOB2_flag_internal_g,
sint32 * __restrict__ preempt_SOB2_sm_g,
//MM
sint32 * __restrict__ preempt_MM_flag_g,
sint32 * __restrict__ preempt_MM_sm_g,
/* Buffer variables */
//SOB1
uint32 * __restrict__ sob1_buffer_loop_counter_u32_g,
//SOB2
uint32 * __restrict__ sob2_buffer_loop_counter_u32_g,
//MM
uint32 * __restrict__ mm_buffer_blockY_g,
uint32 * __restrict__ mm_buffer_blockX_g,
uint32 * __restrict__ mm_buffer_M_g
)
{
cudaStream_t stream_kernel_SOB1;
cudaStream_t stream_kernel_SOB2;
cudaStream_t stream_kernel_MM;
cudaStreamCreateWithFlags(&stream_kernel_SOB1, cudaStreamNonBlocking);
cudaStreamCreateWithFlags(&stream_kernel_SOB2, cudaStreamNonBlocking);
cudaStreamCreateWithFlags(&stream_kernel_MM, cudaStreamNonBlocking);
while(C_TRUE)
{
//Check if host has issued a new event to queue
if(*perKer_eventQueueCntDevice_u32_g != *perKer_eventQueueCntHost_u32_g)
{
//Calculate position of next available event in queue
*perKer_eventQueueCntDevice_u32_g = (*perKer_eventQueueCntDevice_u32_g + 1)
% C_PERSISTENT_KERNEL_EVENT_QUEUE_LENGTH;
//Interpret new event
switch(perKer_eventQueue_s32_g[*perKer_eventQueueCntDevice_u32_g])
{
case C_PERSISTENT_KERNEL_TERMINATE: //Terminate persistent Kernel
*perKer_isRunning_u32_g = C_FALSE;
return;
case E_KTID_SOBEL1:
__syncthreads();
Sobel_Kernel<<<C_SOB1_NUMBER_OF_BLOCKS, C_SOB1_LOCAL_WORK_SIZE, 0, stream_kernel_SOB1>>>
(
sob1_matrix_in_s32_g,
sob1_matrix_out_s32_g,
C_SOB1_HEIGHT,
C_SOB1_WIDTH,
//Preemption status variables
preempt_SOB1_flag_g,
preempt_SOB1_flag_internal_g,
preempt_SOB1_sm_g,
//Buffer variables
sob1_buffer_loop_counter_u32_g,
//Synchronization variables
sync_SOB1_flags_in_u32_g,
sync_SOB1_flags_out_u32_g,
/* Running status flag */
&perKer_kernelTasksRunningStates_u32_g[E_KTID_SOBEL1]
);
__syncthreads();
break;
case E_KTID_SOBEL2:
__syncthreads();
Sobel_Kernel<<<C_SOB2_NUMBER_OF_BLOCKS, C_SOB2_LOCAL_WORK_SIZE, 0, stream_kernel_SOB2>>>
(
sob2_matrix_in_s32_g,
sob2_matrix_out_s32_g,
C_SOB2_HEIGHT,
C_SOB2_WIDTH,
//Preemption status variables
preempt_SOB2_flag_g,
preempt_SOB2_flag_internal_g,
preempt_SOB2_sm_g,
//Buffer variables
sob2_buffer_loop_counter_u32_g,
//Synchronization variables
sync_SOB2_flags_in_u32_g,
sync_SOB2_flags_out_u32_g,
/* Running status flag */
&perKer_kernelTasksRunningStates_u32_g[E_KTID_SOBEL2]
);
__syncthreads();
break;
case E_KTID_MM:
__syncthreads();
dim3 dimGridMM(C_MM_NUMBER_OF_BLOCKS_X, C_MM_NUMBER_OF_BLOCKS_Y);
dim3 dimBlockMM(C_MM_LOCAL_WORK_SIZE_X, C_MM_LOCAL_WORK_SIZE_Y);
MatrMul_Kernel<<<dimGridMM, dimBlockMM, 0, stream_kernel_MM>>>
(
//Functional Data
mm_matrix_A_f32_g,
mm_matrix_B_f32_g,
mm_matrix_C_f32_g,
//Preemption Buffer
mm_buffer_blockY_g,
mm_buffer_blockX_g,
mm_buffer_M_g,
//Preemption Managment
preempt_MM_flag_g,
preempt_MM_sm_g,
//Synchronization Flags
sync_MM_flags_in_u32_g,
sync_MM_flags_out_u32_g,
//Running status flag
&perKer_kernelTasksRunningStates_u32_g[E_KTID_MM]
);
__syncthreads();
break;
}
__threadfence_system();
}
}
}
/************************************************************************************************/
/* General function definition */
/************************************************************************************************/
/*! @brief Copy data from host memory to device memory.
*
* Device memory may be shared physical memory or discrete device memory. The device driver
* API call may depend on the type of device memory (global or texture memory).
*
* @param[in] void * variable_p -> The host variable to be copied
* @param[in] device_global_memory_id_e id_p -> The ID of the global memory variable
*
* @return GPUART_SUCCESS if memory copy operation has been successfully.
* @return GPUART_ERROR_INVALID_ARGUMENT if id_p is an invalid ID.
*/
GPUart_Retval gpuI_memcpyHost2Device(void * variable_p, device_global_memory_id_e id_p)
{
GPUart_Retval retval = GPUART_SUCCESS;
device_global_memory_s device_memory;
if((id_p >= E_GM_TOTAL_NR_OF_GLOB_MEM_VARIABLES)||(variable_p == NULL))
{
retval = GPUART_ERROR_INVALID_ARGUMENT;
}
else
{
device_memory = global_memory_list_a[id_p];
#ifdef S_USE_ZERO_COPY_FOR_GLOBAL_APPLICATION_MEMORY
memcpy(*device_memory.host_ptr, variable_p, device_memory.mem_size);
#else
CUDA_CHECK_RETURN(cudaMemcpyAsync(*device_memory.mem_ptr, variable_p, device_memory.mem_size,
cudaMemcpyHostToDevice, memory_command_queue_s));
#endif
}
return retval;
}
/*! @brief Copy data from device memory to host memory.
*
* Device memory may be shared physical memory or discrete device memory. The device driver
* API call may depend on the type of device memory (global or texture memory).
*
* @param[out] void * variable_p -> The host variable to be written
* @param[in] device_global_memory_id_e id_p -> The ID of the global memory variable
*
* @return GPUART_SUCCESS if memory copy operation has been successfully.
* @return GPUART_ERROR_INVALID_ARGUMENT if id_p is an invalid ID.
*/
GPUart_Retval gpuI_memcpyDevice2Host(void * variable_p, device_global_memory_id_e id_p)
{
GPUart_Retval retval = GPUART_SUCCESS;
device_global_memory_s device_memory;
if((id_p >= E_GM_TOTAL_NR_OF_GLOB_MEM_VARIABLES)||(variable_p == NULL))
{
retval = GPUART_ERROR_INVALID_ARGUMENT;
}
else
{
device_memory = global_memory_list_a[id_p];
#ifdef S_USE_ZERO_COPY_FOR_GLOBAL_APPLICATION_MEMORY
memcpy(variable_p, *device_memory.host_ptr, device_memory.mem_size);
#else
CUDA_CHECK_RETURN(cudaMemcpyAsync(variable_p, *device_memory.mem_ptr, device_memory.mem_size,
cudaMemcpyDeviceToHost, memory_command_queue_s));
#endif
}
return retval;
}
/*************************************************************************************************
Function: gpuI_memcpyConstantMemory
Description: Copies data from host memory to constant device memory. The copy is only possible
if persistent GPUart kernel is not running, since a constant memory variable is
immutable during kernel execution and its value is inherited from parent to child
kernel.
*/
/*! @brief Copy data from host memory to constant device memory.
*
* The copy is only possible if persistent GPUart kernel #GPUart_Persistent_Kernel
* is not running, since a constant memory data is immutable during kernel execution
* and its value is inherited from parent to child kernel.
*
* @param[in] void * variable_p -> The host variable to be copied
* @param[in] device_constant_memory_id_e id_p -> The ID of the constant memory buffer
*
* @return GPUART_SUCCESS if memory copy operation has been successfully.
* @return GPUART_ERROR_INVALID_ARGUMENT if id_p is an invalid ID.
*/
GPUart_Retval gpuI_memcpyConstantMemory(void * variable_p, device_constant_memory_id_e id_p)
{
GPUart_Retval retval = GPUART_SUCCESS;
device_constant_memory_s device_memory;
if((id_p >= E_CM_TOTAL_NR_OF_CONST_MEM_VARIABLES) || (variable_p == NULL))
{
retval = GPUART_ERROR_INVALID_ARGUMENT;
}
else
{
if(*perKer_isRunning_u32_host == C_TRUE)
{
retval = GPUART_ERROR_PESISTENT_KERNEL_IS_RUNNING;
}
else
{
device_memory = constant_memory_list_a[id_p];
CUDA_CHECK_RETURN(cudaMemcpyToSymbolAsync(*device_memory.mem_ptr, variable_p, device_memory.mem_size, 0,
cudaMemcpyHostToDevice, memory_command_queue_s));
CUDA_CHECK_RETURN(cudaStreamSynchronize(memory_command_queue_s));
}
}
return retval;
}
/*!
* @brief Request the launch of a GPGPU kernel.
*
* @param kernel_task_id_e task_id_e -> The ID of the kernel to be launched.
*
* @return GPUART_SUCCESS if kernel launch has been successfully.
* @return GPUART_ERROR_NOT_READY if launch request is already active.
*/
GPUart_Retval gpuI_runJob(kernel_task_id_e task_id_e)
{
GPUart_Retval retval = GPUART_SUCCESS;
uint32 eventQueueCntHost_u32_l;
uint32 kernelStatus = ((volatile uint32 *)perKer_kernelTasksRunningStates_u32_host)[task_id_e];
if((kernelStatus == C_KERNEL_SUSPENDED)||
(kernelStatus == C_KERNEL_READY)||
(kernelStatus == C_KERNEL_INIT))
{
perKer_kernelTasksRunningStates_u32_host[task_id_e] = C_KERNEL_ACTIVE;
// //Reset Preemption flag
if(device_preemption_flags_a[task_id_e] != NULL)
{
// printf("-> Setze PreemptionFlag zurueck fue Kernel %d", task_id_e);
**device_preemption_flags_a[task_id_e] = C_FALSE;
}
//Reset state machine
if((kernelStatus == C_KERNEL_READY)||(kernelStatus == C_KERNEL_INIT))
{
//Do not reset Kernel SM if kernel has been preempted
if(device_kernel_task_SM_a[task_id_e] != NULL)
{
//**device_kernel_task_SM_a[task_id_e] = 0; --> Old. Now, all SMs of an Kernel are set to zero
memset((void *)*device_kernel_task_SM_a[task_id_e], 0, nb_of_StateMachines_in_kernel_a[task_id_e] * sizeof(sint32));
}
}
//Calculate next position in persistent kernel event queue
eventQueueCntHost_u32_l = (perKer_eventQueueCntHost_u32_host[0] + 1)
% C_PERSISTENT_KERNEL_EVENT_QUEUE_LENGTH;
//Set kernel call event
perKer_eventQueue_s32_host[eventQueueCntHost_u32_l] = task_id_e;
//Make new event visible
*perKer_eventQueueCntHost_u32_host = eventQueueCntHost_u32_l;
if((eventQueueCntHost_u32_l == UINT32_MAX )||(eventQueueCntHost_u32_l > C_PERSISTENT_KERNEL_EVENT_QUEUE_LENGTH))
{
printf("\nFEHLER: Host Counter falsch");
}
}
else
{
retval = GPUART_ERROR_NOT_READY;
}
return retval;
}
/*************************************************************************************************
Function: gpuI_preemptJob
Description: Issue preemption of a specific kernel task
*/
GPUart_Retval gpuI_preemptJob(kernel_task_id_e task_id_p)
{
GPUart_Retval retval = GPUART_SUCCESS;
//Check if kernel task is preemptive
if(preemption_enabled_a[task_id_p] == C_TRUE)
{
//Set preemption flag
**device_preemption_flags_a[task_id_p] = C_TRUE;
}
else
{
//Kernel task is not preemptive -> no operation
retval = GPUART_ERROR_NO_OPERTATION;
}
return retval;
}
/*************************************************************************************************
Function: gpuI_queryKernelIsRunning
Description: Query kernel running status.
Returns GPUART_SUCCESS if kernel task is not running.
Returns GPUART_ERROR_NOT_READY if kernel task is still running.
*/
uint32 gpuI_queryKernelIsRunning(kernel_task_id_e task_id_e)
{
uint32 retval = C_TRUE;
//Query stream whether there is a running operation
if((perKer_kernelTasksRunningStates_u32_host[task_id_e] == C_KERNEL_TERMINATED_SUCESSFUL)||
(perKer_kernelTasksRunningStates_u32_host[task_id_e] == C_KERNEL_SUSPENDED)||
(perKer_kernelTasksRunningStates_u32_host[task_id_e] == C_KERNEL_INIT))
{
//Kernel task is not running -> success
retval = C_FALSE;
}
else
{
//Kernel is still running
retval = C_TRUE;
}
return retval;
}
/*************************************************************************************************
Function: gpuI_queryKernelTerminatedSuccessful
Description: Query kernel running status.
Returns GPUART_SUCCESS if kernel task is not running.
Returns GPUART_ERROR_NOT_READY if kernel task is still running.
*/
uint32 gpuI_queryKernelTerminatedSuccessful(kernel_task_id_e task_id_e)
{
uint32 retval = C_TRUE;
//Query stream whether there is a running operation
if(perKer_kernelTasksRunningStates_u32_host[task_id_e] == C_KERNEL_TERMINATED_SUCESSFUL)
{
//Kernel task is not running -> success
}
else
{
//Kernel is still running
retval = C_FALSE;
}
return retval;
}
/*************************************************************************************************
Function: gpuI_queryKernelTerminatedSuccessful
Description: Query kernel running status.
Returns GPUART_SUCCESS if kernel task is not running.
Returns GPUART_ERROR_NOT_READY if kernel task is still running.
*/
uint32 gpuI_queryKernelPreempted(kernel_task_id_e task_id_e)
{
uint32 retval = C_TRUE;
//Query stream whether there is a running operation
if(perKer_kernelTasksRunningStates_u32_host[task_id_e] == C_KERNEL_SUSPENDED)
{
//Kernel task is not running -> success
}
else
{
//Kernel is still running
retval = C_FALSE;
}
return retval;
}
/*************************************************************************************************
Function: gpuI_getJobCosts
Description: Returns the number of thread blocks, i.e. the number of Multiprocessors used for
this kernel.
*/
uint32 gpuI_getJobCosts(kernel_task_id_e task_id_e)
{
uint32 retval = kernel_job_costs[task_id_e];
if(retval > max_costs_per_kernel)
{
retval = max_costs_per_kernel;
}
return retval;
}
/*************************************************************************************************
Function: gpuI_getJobCosts
Description: Sets the internal status of the corresponding kernel to ready. This function is
called after a new job has been enqueued.
*/
GPUart_Retval gpuI_SetKernelStatusReady(kernel_task_id_e task_id_e)
{
GPUart_Retval retval = GPUART_SUCCESS;
perKer_kernelTasksRunningStates_u32_host[task_id_e] = C_KERNEL_READY;
return retval;
}
/*************************************************************************************************
Function: gpuI_getJobCosts
Description: Returns the number of thread blocks, i.e. the number of Multiprocessors used for
this kernel.
*/
GPUart_Retval gpuI_get_NrOfMultiprocessors(uint32* nrOfMultprocessors, uint32 resourceFactor)
{
GPUart_Retval retval = GPUART_SUCCESS;
cudaDeviceProp deviceProp_s;
CUDA_CHECK_RETURN(cudaGetDeviceProperties(&deviceProp_s, gpuI_deviceID_u8));
*nrOfMultprocessors = deviceProp_s.multiProcessorCount * resourceFactor;
max_costs_per_kernel = deviceProp_s.multiProcessorCount * resourceFactor;
printf("\nNumber of multiprocessors on the device: %d", *nrOfMultprocessors);
if(*nrOfMultprocessors == 0)
{
retval = GPUART_NO_SUCCESS;
}
return retval;
}
/*************************************************************************************************
Function: gpuI_init()
Description: Initializes GPGPU Runtime, thus it initializes command_queues, device variables
and host variables.
*/
GPUart_Retval gpuI_init(void)
{
GPUart_Retval retval = GPUART_SUCCESS;
int deviceCount_u32 = 0;
CUDA_CHECK_RETURN(cudaThreadExit());
CUDA_CHECK_RETURN(cudaGetDeviceCount(&deviceCount_u32));
for (int i = 0; i < deviceCount_u32; i++) {
cudaDeviceProp prop;
CUDA_CHECK_RETURN(cudaGetDeviceProperties(&prop, i));
if(prop.integrated)
{
printf("\nDevice %d with shared physical memory selected", i);
printf("\nMax Block Size: %d", prop.maxThreadsPerBlock);
printf("\nRegs per SM: %d", prop.regsPerMultiprocessor);
printf("\nShared memory per SM: %lu", prop.sharedMemPerBlock);
gpuI_deviceID_u8 = i;
break;
}
}
CUDA_CHECK_RETURN(cudaSetDevice(gpuI_deviceID_u8));
/* Initialize device configurations */
CUDA_CHECK_RETURN(cudaSetDeviceFlags(cudaDeviceMapHost));
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
/* Initialize command queues */
CUDA_CHECK_RETURN( cudaStreamCreate(&memory_command_queue_s) );
CUDA_CHECK_RETURN( cudaStreamCreate(&persistent_kernel_command_queue_s) );
/* Device only variables */
/* Sobel1 ***********************************************************************************************************/
/* Initialize synchronization flags*/
CUDA_CHECK_RETURN( cudaMalloc( (void **)&sync_SOB1_flags_in_u32_g, C_SOB1_NUMBER_OF_BLOCKS * sizeof(uint32)) );
CUDA_CHECK_RETURN( cudaMalloc( (void **)&sync_SOB1_flags_out_u32_g, C_SOB1_NUMBER_OF_BLOCKS * sizeof(uint32)) );
/* Initialize preemption managment variables*/
CUDA_CHECK_RETURN( cudaMallocHost( (void **)&preempt_SOB1_flag_host, sizeof(sint32)) );
CUDA_CHECK_RETURN( cudaHostGetDevicePointer( (void **)&preempt_SOB1_flag_g, (void *)preempt_SOB1_flag_host, 0) );
CUDA_CHECK_RETURN( cudaMalloc( (void **)&preempt_SOB1_flag_internal_g, sizeof(sint32)) );
CUDA_CHECK_RETURN( cudaMallocHost( (void **)&preempt_SOB1_sm_host, sizeof(sint32)) );
CUDA_CHECK_RETURN( cudaHostGetDevicePointer( (void **)&preempt_SOB1_sm_g, (void *)preempt_SOB1_sm_host, 0) );
/* Initialize preemption buffer*/
CUDA_CHECK_RETURN( cudaMalloc( (void **)&sob1_buffer_loop_counter_u32_g, C_SOB1_GLOBAL_WORK_SIZE * sizeof(uint32)) );
/* Sobel2 ***********************************************************************************************************/
/* Initialize synchronization flags*/
CUDA_CHECK_RETURN( cudaMalloc( (void **)&sync_SOB2_flags_in_u32_g, C_SOB2_NUMBER_OF_BLOCKS * sizeof(uint32)) );
CUDA_CHECK_RETURN( cudaMalloc( (void **)&sync_SOB2_flags_out_u32_g, C_SOB2_NUMBER_OF_BLOCKS * sizeof(uint32)) );
/* Initialize preemption managment variables*/
CUDA_CHECK_RETURN( cudaMallocHost( (void **)&preempt_SOB2_flag_host, sizeof(sint32)) );
CUDA_CHECK_RETURN( cudaHostGetDevicePointer( (void **)&preempt_SOB2_flag_g, (void *)preempt_SOB2_flag_host, 0) );
CUDA_CHECK_RETURN( cudaMalloc( (void **)&preempt_SOB2_flag_internal_g, sizeof(sint32)) );
CUDA_CHECK_RETURN( cudaMallocHost( (void **)&preempt_SOB2_sm_host, sizeof(sint32)) );
CUDA_CHECK_RETURN( cudaHostGetDevicePointer( (void **)&preempt_SOB2_sm_g, (void *)preempt_SOB2_sm_host, 0) );
/* Initialize preemption buffer*/
CUDA_CHECK_RETURN( cudaMalloc( (void **)&sob2_buffer_loop_counter_u32_g, C_SOB2_GLOBAL_WORK_SIZE * sizeof(uint32)) );
/* MatrMul *********************************************************************************************************/
/* Initialize synchronization flags*/
CUDA_CHECK_RETURN( cudaMalloc( (void **)&sync_MM_flags_in_u32_g, C_MM_NUMBER_OF_BLOCKS * sizeof(uint32)) );
CUDA_CHECK_RETURN( cudaMalloc( (void **)&sync_MM_flags_out_u32_g, C_MM_NUMBER_OF_BLOCKS * sizeof(uint32)) );
/* Initialize preemption managment variables*/
CUDA_CHECK_RETURN( cudaMallocHost( (void **)&preempt_MM_flag_host, sizeof(sint32)) );
CUDA_CHECK_RETURN( cudaHostGetDevicePointer( (void **)&preempt_MM_flag_g, (void *)preempt_MM_flag_host, 0) );
CUDA_CHECK_RETURN( cudaMallocHost( (void **)&preempt_MM_sm_host, C_MM_NUMBER_OF_BLOCKS * sizeof(sint32)) );
CUDA_CHECK_RETURN( cudaHostGetDevicePointer( (void **)&preempt_MM_sm_g, (void *)preempt_MM_sm_host, 0) );
/* Initialize preemption buffer*/
CUDA_CHECK_RETURN( cudaMalloc( (void **)&mm_buffer_blockY_g, C_MM_NUMBER_OF_BLOCKS * sizeof(uint32)) );
CUDA_CHECK_RETURN( cudaMalloc( (void **)&mm_buffer_blockX_g, C_MM_NUMBER_OF_BLOCKS * sizeof(uint32)) );
CUDA_CHECK_RETURN( cudaMalloc( (void **)&mm_buffer_M_g, C_MM_NUMBER_OF_BLOCKS * sizeof(uint32)) );
/* Initialize persistent kernel management variables */
CUDA_CHECK_RETURN( cudaMallocHost( (void **) &perKer_isRunning_u32_host, sizeof(uint32)) );
CUDA_CHECK_RETURN( cudaHostGetDevicePointer( (void **)&perKer_isRunning_u32_g, (void *)perKer_isRunning_u32_host, 0) );
CUDA_CHECK_RETURN( cudaMallocHost( (void **) &perKer_eventQueueCntDevice_u32_host, sizeof(uint32)) );
CUDA_CHECK_RETURN( cudaHostGetDevicePointer( (void **)&perKer_eventQueueCntDevice_u32_g, (void *)perKer_eventQueueCntDevice_u32_host, 0) );
CUDA_CHECK_RETURN( cudaMallocHost( (void **) &perKer_eventQueueCntHost_u32_host, sizeof(uint32)) );
CUDA_CHECK_RETURN( cudaHostGetDevicePointer( (void **)&perKer_eventQueueCntHost_u32_g, (void *)perKer_eventQueueCntHost_u32_host, 0) );
CUDA_CHECK_RETURN( cudaMallocHost( (void **) &perKer_eventQueue_s32_host, C_PERSISTENT_KERNEL_EVENT_QUEUE_LENGTH * sizeof(sint32)) );
CUDA_CHECK_RETURN( cudaHostGetDevicePointer( (void **)&perKer_eventQueue_s32_g, (void *)perKer_eventQueue_s32_host, 0) );
CUDA_CHECK_RETURN( cudaMallocHost( (void **) &perKer_kernelTasksRunningStates_u32_host, E_KTID_NUMBER_OF_KERNEL_TASKS * sizeof(uint32)) );
CUDA_CHECK_RETURN( cudaHostGetDevicePointer( (void **)&perKer_kernelTasksRunningStates_u32_g, (void *)perKer_kernelTasksRunningStates_u32_host, 0) );
/* Initialize global device application variables */
for(int i = 0; i < E_GM_TOTAL_NR_OF_GLOB_MEM_VARIABLES; i++ )
{
#ifdef S_USE_ZERO_COPY_FOR_GLOBAL_APPLICATION_MEMORY
CUDA_CHECK_RETURN( cudaMallocHost( (void **)global_memory_list_a[i].host_ptr, global_memory_list_a[i].mem_size) );
CUDA_CHECK_RETURN( cudaHostGetDevicePointer( (void **)global_memory_list_a[i].mem_ptr, (void *) *global_memory_list_a[i].host_ptr, 0) );
#else
CUDA_CHECK_RETURN( cudaMalloc((void **)global_memory_list_a[i].mem_ptr, global_memory_list_a[i].mem_size) );
#endif
}
//Initialize status variables
*perKer_isRunning_u32_host = 0;
*perKer_eventQueueCntDevice_u32_host = 0;
*perKer_eventQueueCntHost_u32_host = 0;
for(int i = 0; i < E_KTID_NUMBER_OF_KERNEL_TASKS; i++)
{
perKer_kernelTasksRunningStates_u32_host[i] = C_KERNEL_INIT;
if(device_preemption_flags_a[i] != NULL)
{
**device_preemption_flags_a[i] = C_FALSE;
}
if(device_kernel_task_SM_a[i] != NULL)
{
**device_preemption_flags_a[i] = C_FALSE;
}
}
return retval;
}
//TODO:Wird der persistent Kernel gestartet, so sollte ein Flag gesetzt werden, was das Schreiben von COnstanten variablen ablehnt
/*************************************************************************************************
Function: gpuI_start()
Description: Start execution of persistent GPUart kernel.
*/
GPUart_Retval gpuI_start(void)
{
GPUart_Retval retval = GPUART_SUCCESS;
*perKer_isRunning_u32_host = C_TRUE; //After setting this flag constant memory writes are disabled
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
GPUart_Persistent_Kernel <<<1, 1, 0, persistent_kernel_command_queue_s>>>
(
perKer_isRunning_u32_g,
perKer_eventQueueCntDevice_u32_g,
perKer_eventQueueCntHost_u32_g,
perKer_eventQueue_s32_g,
perKer_kernelTasksRunningStates_u32_g,
//Sobel1 variables
sob1_matrix_in_s32_g,
sob1_matrix_out_s32_g,
//Sobel2 variables
sob2_matrix_in_s32_g,
sob2_matrix_out_s32_g,
//MM variables
mm_matrix_A_f32_g,
mm_matrix_B_f32_g,
mm_matrix_C_f32_g,
//Synchronization variables
sync_SOB1_flags_in_u32_g,
sync_SOB1_flags_out_u32_g,
sync_SOB2_flags_in_u32_g,
sync_SOB2_flags_out_u32_g,
sync_MM_flags_in_u32_g,
sync_MM_flags_out_u32_g,
//Preemption variables
preempt_SOB1_flag_g,
preempt_SOB1_flag_internal_g,
preempt_SOB1_sm_g,
preempt_SOB2_flag_g,
preempt_SOB2_flag_internal_g,
preempt_SOB2_sm_g,
preempt_MM_flag_g,
preempt_MM_sm_g,
//Buffer variables
//SOB1
sob1_buffer_loop_counter_u32_g,
//SOB2
sob2_buffer_loop_counter_u32_g,
//MM
mm_buffer_blockY_g,
mm_buffer_blockX_g,
mm_buffer_M_g
);
printf(".. started");
fflush(stdout);
return retval;
}
/*************************************************************************************************
Function: gpuI_stop()
Description: Stop execution of persisten GPUart kernel.
*/
GPUart_Retval gpuI_stop(void)
{
GPUart_Retval retval = GPUART_SUCCESS;
uint32 eventQueueCntHost_u32_l;
printf("\nSTOP PERSISTENT KERNEL");
//Calculate next position in persistent kernel event queue
eventQueueCntHost_u32_l = (*perKer_eventQueueCntHost_u32_host + 1) % C_PERSISTENT_KERNEL_EVENT_QUEUE_LENGTH;
//Set termination event
perKer_eventQueue_s32_host[eventQueueCntHost_u32_l] = C_PERSISTENT_KERNEL_TERMINATE;
//Make new event visible
*perKer_eventQueueCntHost_u32_host = eventQueueCntHost_u32_l;
return retval;
}
/*************************************************************************************************
Function: gpuI_destroy()
Description: Terminates GPUart.
Free dedicated or shared device memory. Destroy command_queues.
*/
GPUart_Retval gpuI_destroy(void)
{
GPUart_Retval retval = GPUART_SUCCESS;
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
/* Free global device variables */
for(int i = 0; i < (int)E_GM_TOTAL_NR_OF_GLOB_MEM_VARIABLES; i++ )
{
#ifdef S_USE_ZERO_COPY_FOR_GLOBAL_APPLICATION_MEMORY
CUDA_CHECK_RETURN( cudaFreeHost(*global_memory_list_a[i].host_ptr) );
#else
CUDA_CHECK_RETURN( cudaFree(*global_memory_list_a[i].mem_ptr) );
#endif
}
/* Destroy device only variables */
/* Destroy persistent kernel variables */
CUDA_CHECK_RETURN(cudaFreeHost((void *)perKer_isRunning_u32_host));
CUDA_CHECK_RETURN(cudaFreeHost((void *)perKer_eventQueueCntDevice_u32_host));
CUDA_CHECK_RETURN(cudaFreeHost((void *)perKer_eventQueueCntHost_u32_host));
CUDA_CHECK_RETURN(cudaFreeHost((void *)perKer_eventQueue_s32_host));
CUDA_CHECK_RETURN(cudaFreeHost((void *)perKer_kernelTasksRunningStates_u32_host));
/* Destroy command queues */
CUDA_CHECK_RETURN( cudaStreamDestroy(memory_command_queue_s) );
CUDA_CHECK_RETURN( cudaStreamDestroy(persistent_kernel_command_queue_s) );
CUDA_CHECK_RETURN( cudaDeviceReset());
return retval;
}
|
9cfaa0c00c9393dc412a02ce7f337a2917aced4e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "layer.h"
// --------------------------------------------------------------------------
// kernel code
// relu_{gpu, cpu}
// prelu_{gpu, cpu}
// relu_inplace_{gpu, cpu}
// prelu_inplace_{gpu, cpu}
// --------------------------------------------------------------------------
// ReLU transform bottom -> top
// top[i] = 0 if bottom[i] <= 0
#ifdef GPU
__global__
void relu_gpu(const real* const bottom, real* const top,
const long int data_size)
{
const long int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < data_size) {
top[index] = (bottom[index] > 0) ? bottom[index] : 0;
}
}
#else
void relu_cpu(const real* const bottom, real* const top,
const long int data_size)
{
for (long int index = 0; index < data_size; ++index) {
top[index] = (bottom[index] > 0) ? bottom[index] : 0;
}
}
#endif
// soft ReLU transform bottom -> top
// top[i] = slope * bottom[i] if bottom[i] <= 0
#ifdef GPU
__global__
void prelu_gpu(const real* const bottom, real* const top,
const long int data_size, const real negative_slope)
{
const long int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < data_size) {
top[index] = (bottom[index] > 0) ? bottom[index] :
bottom[index] * negative_slope;
}
}
#else
void prelu_cpu(const real* const bottom, real* const top,
const long int data_size, const real negative_slope)
{
for (long int index = 0; index < data_size; ++index) {
top[index] = (bottom[index] > 0) ? bottom[index] :
bottom[index] * negative_slope;
}
}
#endif
// in-place ReLU transform
#ifdef GPU
__global__
void relu_inplace_gpu(real* const bottom, const long int data_size)
{
const long int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < data_size) {
bottom[index] = (bottom[index] > 0) ? bottom[index] : 0;
}
}
#else
void relu_inplace_cpu(real* const bottom, const long int data_size)
{
for (long int index = 0; index < data_size; ++index) {
bottom[index] = (bottom[index] > 0) ? bottom[index] : 0;
}
}
#endif
// in-place soft ReLU transform
#ifdef GPU
__global__
void prelu_inplace_gpu(real* const bottom, const long int data_size,
const real negative_slope)
{
const long int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < data_size) {
bottom[index] = (bottom[index] > 0) ? bottom[index] :
bottom[index] * negative_slope;
}
}
#else
void prelu_inplace_cpu(real* const bottom, const long int data_size,
const real negative_slope)
{
for (long int index = 0; index < data_size; ++index) {
bottom[index] = (bottom[index] > 0) ? bottom[index] :
bottom[index] * negative_slope;
}
}
#endif
// --------------------------------------------------------------------------
// layer operator code
// relu_forward
// relu_forward_inplace
// --------------------------------------------------------------------------
// (soft-)ReLU transform: bottom -> top
// data size: total number of nodes (N * C * H * W or something)
// if option->negative_slope = 0, perform ReLU
// > 0, perform soft ReLU
void relu_forward(const Tensor* const bottom,
Tensor* const top,
const LayerOption* const option)
{
const long int data_size = flatten_size(bottom);
// perform (soft-)ReLU transform
// if option->negative_slope = 0, perform ReLU
// > 0, perform soft ReLU
#ifdef GPU
{
const int threads_per_block = 512;
const int num_blocks = DIV_THEN_CEIL(data_size, threads_per_block);
if (option->negative_slope == 0) {
hipLaunchKernelGGL(( relu_gpu), dim3(num_blocks), dim3(threads_per_block), 0, 0,
bottom->data, top->data, data_size);
}
else {
hipLaunchKernelGGL(( prelu_gpu), dim3(num_blocks), dim3(threads_per_block), 0, 0,
bottom->data, top->data, data_size, option->negative_slope);
}
}
#else
{
if (option->negative_slope == 0) {
relu_cpu(
bottom->data, top->data, data_size);
}
else {
prelu_cpu(
bottom->data, top->data, data_size, option->negative_slope);
}
}
#endif
// set top shape (= bottom shape)
{
top->ndim = bottom->ndim;
top->num_items = bottom->num_items;
for (int n = 0; n < bottom->num_items; ++n) {
for (int i = 0; i < bottom->ndim; ++i) {
top->shape[n][i] = bottom->shape[n][i];
}
}
}
}
// in-place (soft-)ReLU transform: bottom -> bottom
// data size: total number of nodes (N * C * H * W or something)
// if option->negative_slope = 0, perform ReLU
// > 0, perform soft ReLU
void relu_forward_inplace(Tensor* const bottom,
const LayerOption* const option)
{
const long int data_size = flatten_size(bottom);
// perform (soft-)ReLU transform
// if option->negative_slope = 0, perform ReLU
// > 0, perform soft ReLU
#ifdef GPU
{
const int threads_per_block = 512;
const int num_blocks = DIV_THEN_CEIL(data_size, threads_per_block);
if (option->negative_slope == 0) {
hipLaunchKernelGGL(( relu_inplace_gpu), dim3(num_blocks), dim3(threads_per_block), 0, 0,
bottom->data, data_size);
}
else {
hipLaunchKernelGGL(( prelu_inplace_gpu), dim3(num_blocks), dim3(threads_per_block), 0, 0,
bottom->data, data_size, option->negative_slope);
}
}
#else
{
if (option->negative_slope == 0) {
relu_inplace_cpu(
bottom->data, data_size);
}
else {
prelu_inplace_cpu(
bottom->data, data_size, option->negative_slope);
}
}
#endif
}
// --------------------------------------------------------------------------
// layer shape calculator code
// --------------------------------------------------------------------------
void relu_shape(const Tensor* const bottom,
Tensor* const top)
{
// top shape = bottom shape
top->ndim = bottom->ndim;
top->num_items = bottom->num_items;
for (int n = 0; n < bottom->num_items; ++n) {
for (int i = 0; i < bottom->ndim; ++i) {
top->shape[n][i] = bottom->shape[n][i];
}
}
}
// --------------------------------------------------------------------------
// API code
// --------------------------------------------------------------------------
void forward_relu_layer(void* const net_, void* const layer_)
{
Layer* const layer = (Layer*)layer_;
relu_forward(layer->p_bottoms[0], &layer->tops[0], &layer->option);
print_tensor_info(layer->name, &layer->tops[0]);
}
void forward_inplace_relu_layer(void* const net_, void* const layer_)
{
Layer* const layer = (Layer*)layer_;
relu_forward_inplace(&layer->tops[0], &layer->option);
print_tensor_info(layer->name, &layer->tops[0]);
}
void shape_relu_layer(void* const net_, void* const layer_)
{
Layer* const layer = (Layer*)layer_;
relu_shape(layer->p_bottoms[0], &layer->tops[0]);
}
// --------------------------------------------------------------------------
// test code
// --------------------------------------------------------------------------
#ifdef TEST
int main(int argc, char* argv[])
{
// variable declaration & memory allocation
Tensor X, Y_relu, Y_prelu;
real *X_data = NULL, *relu_data = NULL, *prelu_data = NULL;
LayerOption option;
// set option
{
option.negative_slope = 0;
}
// load data
{
int ndim;
int shape[g_max_ndim];
int total_size;
X_data = load_data("../data/temp/conv_top0.bin",
&ndim, shape, NULL);
X.num_items = shape[0];
X.ndim = ndim - 1;
total_size = 0;
for (int n = 0; n < X.num_items; ++n) {
int size_n = 1;
for (int i = 0; i < X.ndim; ++i) {
X.shape[n][i] = shape[i + 1];
size_n *= shape[i + 1];
}
X.start[n] = total_size;
total_size += size_n;
}
relu_shape(&X, &Y_relu);
relu_shape(&X, &Y_prelu);
relu_data = (real*)malloc(flatten_size(&Y_relu) * sizeof(real));
prelu_data = (real*)malloc(flatten_size(&Y_prelu) * sizeof(real));
}
// CUDA initialization
#ifdef GPU
{
printf("set device\n");
hipSetDevice(0);
}
#endif
// bind loaded data to corresponding tensors
#ifdef GPU
{
const long int X_size = flatten_size(&X);
const long int relu_size = flatten_size(&Y_relu);
const long int prelu_size = flatten_size(&Y_prelu);
printf("gpu malloc\n");
hipMalloc(&X.data, X_size * sizeof(real));
hipMalloc(&Y_relu.data, relu_size * sizeof(real));
hipMalloc(&Y_prelu.data, prelu_size * sizeof(real));
printf("memcpy: cpu -> gpu\n");
hipMemcpyAsync(X.data, X_data, X_size * sizeof(real),
hipMemcpyHostToDevice);
}
#else
{
X.data = X_data;
Y_relu.data = relu_data;
Y_prelu.data = prelu_data;
}
#endif
// do forward operation
{
printf("do forward (relu)\n");
relu_forward(&X, &Y_relu, &option);
printf("do forward (prelu)\n");
option.negative_slope = 0.1f;
relu_forward(&X, &Y_prelu, &option);
}
// copy GPU data to main memory
#ifdef GPU
{
const long int relu_size = flatten_size(&Y_relu);
const long int prelu_size = flatten_size(&Y_prelu);
printf("memcpy: cpu <- gpu (relu)\n");
hipMemcpyAsync(relu_data, Y_relu.data, relu_size * sizeof(real),
hipMemcpyDeviceToHost);
printf("memcpy: cpu <- gpu (prelu)\n");
hipMemcpyAsync(prelu_data, Y_prelu.data, prelu_size * sizeof(real),
hipMemcpyDeviceToHost);
}
#endif
// verify results
{
const long int relu_size = flatten_size(&Y_relu);
const long int prelu_size = flatten_size(&Y_prelu);
printf("verification (relu)\n");
for (int i = 0; i < relu_size; ++i) {
if (relu_data[i] != X_data[i]
&& (relu_data[i] != 0 || X_data[i] > 0)) {
printf("top[%d] = %.6f, bottom[%d] = %.6f\n",
i, relu_data[i], i, X_data[i]);
}
}
printf("verification (prelu)\n");
for (int i = 0; i < prelu_size; ++i) {
if (prelu_data[i] != X_data[i]
&& (prelu_data[i] != option.negative_slope * X_data[i]
|| X_data[i] > 0)) {
printf("top[%d] = %.6f, bottom[%d] = %.6f\n",
i, prelu_data[i], i, X_data[i]);
}
}
}
// memory deallocation
{
printf("free\n");
free(X_data);
free(relu_data);
free(prelu_data);
}
#ifdef GPU
{
printf("gpu free\n");
hipFree(X.data);
hipFree(Y_relu.data);
hipFree(Y_prelu.data);
}
#endif
return 0;
}
#endif // endifdef TEST
| 9cfaa0c00c9393dc412a02ce7f337a2917aced4e.cu | #include "layer.h"
// --------------------------------------------------------------------------
// kernel code
// relu_{gpu, cpu}
// prelu_{gpu, cpu}
// relu_inplace_{gpu, cpu}
// prelu_inplace_{gpu, cpu}
// --------------------------------------------------------------------------
// ReLU transform bottom -> top
// top[i] = 0 if bottom[i] <= 0
#ifdef GPU
__global__
void relu_gpu(const real* const bottom, real* const top,
const long int data_size)
{
const long int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < data_size) {
top[index] = (bottom[index] > 0) ? bottom[index] : 0;
}
}
#else
void relu_cpu(const real* const bottom, real* const top,
const long int data_size)
{
for (long int index = 0; index < data_size; ++index) {
top[index] = (bottom[index] > 0) ? bottom[index] : 0;
}
}
#endif
// soft ReLU transform bottom -> top
// top[i] = slope * bottom[i] if bottom[i] <= 0
#ifdef GPU
__global__
void prelu_gpu(const real* const bottom, real* const top,
const long int data_size, const real negative_slope)
{
const long int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < data_size) {
top[index] = (bottom[index] > 0) ? bottom[index] :
bottom[index] * negative_slope;
}
}
#else
void prelu_cpu(const real* const bottom, real* const top,
const long int data_size, const real negative_slope)
{
for (long int index = 0; index < data_size; ++index) {
top[index] = (bottom[index] > 0) ? bottom[index] :
bottom[index] * negative_slope;
}
}
#endif
// in-place ReLU transform
#ifdef GPU
__global__
void relu_inplace_gpu(real* const bottom, const long int data_size)
{
const long int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < data_size) {
bottom[index] = (bottom[index] > 0) ? bottom[index] : 0;
}
}
#else
void relu_inplace_cpu(real* const bottom, const long int data_size)
{
for (long int index = 0; index < data_size; ++index) {
bottom[index] = (bottom[index] > 0) ? bottom[index] : 0;
}
}
#endif
// in-place soft ReLU transform
#ifdef GPU
__global__
void prelu_inplace_gpu(real* const bottom, const long int data_size,
const real negative_slope)
{
const long int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < data_size) {
bottom[index] = (bottom[index] > 0) ? bottom[index] :
bottom[index] * negative_slope;
}
}
#else
void prelu_inplace_cpu(real* const bottom, const long int data_size,
const real negative_slope)
{
for (long int index = 0; index < data_size; ++index) {
bottom[index] = (bottom[index] > 0) ? bottom[index] :
bottom[index] * negative_slope;
}
}
#endif
// --------------------------------------------------------------------------
// layer operator code
// relu_forward
// relu_forward_inplace
// --------------------------------------------------------------------------
// (soft-)ReLU transform: bottom -> top
// data size: total number of nodes (N * C * H * W or something)
// if option->negative_slope = 0, perform ReLU
// > 0, perform soft ReLU
void relu_forward(const Tensor* const bottom,
Tensor* const top,
const LayerOption* const option)
{
const long int data_size = flatten_size(bottom);
// perform (soft-)ReLU transform
// if option->negative_slope = 0, perform ReLU
// > 0, perform soft ReLU
#ifdef GPU
{
const int threads_per_block = 512;
const int num_blocks = DIV_THEN_CEIL(data_size, threads_per_block);
if (option->negative_slope == 0) {
relu_gpu<<<num_blocks, threads_per_block>>>(
bottom->data, top->data, data_size);
}
else {
prelu_gpu<<<num_blocks, threads_per_block>>>(
bottom->data, top->data, data_size, option->negative_slope);
}
}
#else
{
if (option->negative_slope == 0) {
relu_cpu(
bottom->data, top->data, data_size);
}
else {
prelu_cpu(
bottom->data, top->data, data_size, option->negative_slope);
}
}
#endif
// set top shape (= bottom shape)
{
top->ndim = bottom->ndim;
top->num_items = bottom->num_items;
for (int n = 0; n < bottom->num_items; ++n) {
for (int i = 0; i < bottom->ndim; ++i) {
top->shape[n][i] = bottom->shape[n][i];
}
}
}
}
// in-place (soft-)ReLU transform: bottom -> bottom
// data size: total number of nodes (N * C * H * W or something)
// if option->negative_slope = 0, perform ReLU
// > 0, perform soft ReLU
void relu_forward_inplace(Tensor* const bottom,
const LayerOption* const option)
{
const long int data_size = flatten_size(bottom);
// perform (soft-)ReLU transform
// if option->negative_slope = 0, perform ReLU
// > 0, perform soft ReLU
#ifdef GPU
{
const int threads_per_block = 512;
const int num_blocks = DIV_THEN_CEIL(data_size, threads_per_block);
if (option->negative_slope == 0) {
relu_inplace_gpu<<<num_blocks, threads_per_block>>>(
bottom->data, data_size);
}
else {
prelu_inplace_gpu<<<num_blocks, threads_per_block>>>(
bottom->data, data_size, option->negative_slope);
}
}
#else
{
if (option->negative_slope == 0) {
relu_inplace_cpu(
bottom->data, data_size);
}
else {
prelu_inplace_cpu(
bottom->data, data_size, option->negative_slope);
}
}
#endif
}
// --------------------------------------------------------------------------
// layer shape calculator code
// --------------------------------------------------------------------------
void relu_shape(const Tensor* const bottom,
Tensor* const top)
{
// top shape = bottom shape
top->ndim = bottom->ndim;
top->num_items = bottom->num_items;
for (int n = 0; n < bottom->num_items; ++n) {
for (int i = 0; i < bottom->ndim; ++i) {
top->shape[n][i] = bottom->shape[n][i];
}
}
}
// --------------------------------------------------------------------------
// API code
// --------------------------------------------------------------------------
void forward_relu_layer(void* const net_, void* const layer_)
{
Layer* const layer = (Layer*)layer_;
relu_forward(layer->p_bottoms[0], &layer->tops[0], &layer->option);
print_tensor_info(layer->name, &layer->tops[0]);
}
void forward_inplace_relu_layer(void* const net_, void* const layer_)
{
Layer* const layer = (Layer*)layer_;
relu_forward_inplace(&layer->tops[0], &layer->option);
print_tensor_info(layer->name, &layer->tops[0]);
}
void shape_relu_layer(void* const net_, void* const layer_)
{
Layer* const layer = (Layer*)layer_;
relu_shape(layer->p_bottoms[0], &layer->tops[0]);
}
// --------------------------------------------------------------------------
// test code
// --------------------------------------------------------------------------
#ifdef TEST
int main(int argc, char* argv[])
{
// variable declaration & memory allocation
Tensor X, Y_relu, Y_prelu;
real *X_data = NULL, *relu_data = NULL, *prelu_data = NULL;
LayerOption option;
// set option
{
option.negative_slope = 0;
}
// load data
{
int ndim;
int shape[g_max_ndim];
int total_size;
X_data = load_data("../data/temp/conv_top0.bin",
&ndim, shape, NULL);
X.num_items = shape[0];
X.ndim = ndim - 1;
total_size = 0;
for (int n = 0; n < X.num_items; ++n) {
int size_n = 1;
for (int i = 0; i < X.ndim; ++i) {
X.shape[n][i] = shape[i + 1];
size_n *= shape[i + 1];
}
X.start[n] = total_size;
total_size += size_n;
}
relu_shape(&X, &Y_relu);
relu_shape(&X, &Y_prelu);
relu_data = (real*)malloc(flatten_size(&Y_relu) * sizeof(real));
prelu_data = (real*)malloc(flatten_size(&Y_prelu) * sizeof(real));
}
// CUDA initialization
#ifdef GPU
{
printf("set device\n");
cudaSetDevice(0);
}
#endif
// bind loaded data to corresponding tensors
#ifdef GPU
{
const long int X_size = flatten_size(&X);
const long int relu_size = flatten_size(&Y_relu);
const long int prelu_size = flatten_size(&Y_prelu);
printf("gpu malloc\n");
cudaMalloc(&X.data, X_size * sizeof(real));
cudaMalloc(&Y_relu.data, relu_size * sizeof(real));
cudaMalloc(&Y_prelu.data, prelu_size * sizeof(real));
printf("memcpy: cpu -> gpu\n");
cudaMemcpyAsync(X.data, X_data, X_size * sizeof(real),
cudaMemcpyHostToDevice);
}
#else
{
X.data = X_data;
Y_relu.data = relu_data;
Y_prelu.data = prelu_data;
}
#endif
// do forward operation
{
printf("do forward (relu)\n");
relu_forward(&X, &Y_relu, &option);
printf("do forward (prelu)\n");
option.negative_slope = 0.1f;
relu_forward(&X, &Y_prelu, &option);
}
// copy GPU data to main memory
#ifdef GPU
{
const long int relu_size = flatten_size(&Y_relu);
const long int prelu_size = flatten_size(&Y_prelu);
printf("memcpy: cpu <- gpu (relu)\n");
cudaMemcpyAsync(relu_data, Y_relu.data, relu_size * sizeof(real),
cudaMemcpyDeviceToHost);
printf("memcpy: cpu <- gpu (prelu)\n");
cudaMemcpyAsync(prelu_data, Y_prelu.data, prelu_size * sizeof(real),
cudaMemcpyDeviceToHost);
}
#endif
// verify results
{
const long int relu_size = flatten_size(&Y_relu);
const long int prelu_size = flatten_size(&Y_prelu);
printf("verification (relu)\n");
for (int i = 0; i < relu_size; ++i) {
if (relu_data[i] != X_data[i]
&& (relu_data[i] != 0 || X_data[i] > 0)) {
printf("top[%d] = %.6f, bottom[%d] = %.6f\n",
i, relu_data[i], i, X_data[i]);
}
}
printf("verification (prelu)\n");
for (int i = 0; i < prelu_size; ++i) {
if (prelu_data[i] != X_data[i]
&& (prelu_data[i] != option.negative_slope * X_data[i]
|| X_data[i] > 0)) {
printf("top[%d] = %.6f, bottom[%d] = %.6f\n",
i, prelu_data[i], i, X_data[i]);
}
}
}
// memory deallocation
{
printf("free\n");
free(X_data);
free(relu_data);
free(prelu_data);
}
#ifdef GPU
{
printf("gpu free\n");
cudaFree(X.data);
cudaFree(Y_relu.data);
cudaFree(Y_prelu.data);
}
#endif
return 0;
}
#endif // endifdef TEST
|
6e7e0a872933717a76222bb03f45b86eb41c432e.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef _GDD_CU_
#define _GDD_CU_
//#include "gqd_api.h"
#include "gdd_inline.cu"
/** constants in the constant memory */
#define n_dd_inv_fact (15)
static __device__ __constant__ GPU_dd dd_inv_fact[n_dd_inv_fact];
GPU_dd* d_dd_sin_table = NULL;
GPU_dd* d_dd_cos_table = NULL;
/** init function */
void GDDFree() {
if(d_dd_sin_table) {
GPUFREE(d_dd_sin_table);
}
if(d_dd_cos_table) {
GPUFREE(d_dd_cos_table);
}
}
void GDDInit() {
printf("GDD initialization...\n");
//inverse table
GPU_dd h_inv_fact[] = {
make_dd( 1.66666666666666657e-01, 9.25185853854297066e-18),
make_dd( 4.16666666666666644e-02, 2.31296463463574266e-18),
make_dd( 8.33333333333333322e-03, 1.15648231731787138e-19),
make_dd( 1.38888888888888894e-03, -5.30054395437357706e-20),
make_dd( 1.98412698412698413e-04, 1.72095582934207053e-22),
make_dd( 2.48015873015873016e-05, 2.15119478667758816e-23),
make_dd( 2.75573192239858925e-06, -1.85839327404647208e-22),
make_dd( 2.75573192239858883e-07, 2.37677146222502973e-23),
make_dd( 2.50521083854417202e-08, -1.44881407093591197e-24),
make_dd( 2.08767569878681002e-09, -1.20734505911325997e-25),
make_dd( 1.60590438368216133e-10, 1.25852945887520981e-26),
make_dd( 1.14707455977297245e-11, 2.06555127528307454e-28),
make_dd( 7.64716373181981641e-13, 7.03872877733453001e-30),
make_dd( 4.77947733238738525e-14, 4.39920548583408126e-31),
make_dd( 2.81145725434552060e-15, 1.65088427308614326e-31)
};
CUDA_SAFE_CALL( hipMemcpyToSymbol( dd_inv_fact, h_inv_fact, sizeof(GPU_dd)*n_dd_inv_fact ) );
GPU_dd h_sin_table [] = {
make_dd(1.950903220161282758e-01, -7.991079068461731263e-18),
make_dd(3.826834323650897818e-01, -1.005077269646158761e-17),
make_dd(5.555702330196021776e-01, 4.709410940561676821e-17),
make_dd(7.071067811865475727e-01, -4.833646656726456726e-17)
};
GPUMALLOC((void**)&d_dd_sin_table, sizeof(GPU_dd)*4);
TOGPU(d_dd_sin_table, h_sin_table, sizeof(GPU_dd)*4);
GPU_dd h_cos_table [] = {
make_dd(9.807852804032304306e-01, 1.854693999782500573e-17),
make_dd(9.238795325112867385e-01, 1.764504708433667706e-17),
make_dd(8.314696123025452357e-01, 1.407385698472802389e-18),
make_dd(7.071067811865475727e-01, -4.833646656726456726e-17)
};
GPUMALLOC((void**)&d_dd_cos_table, sizeof(GPU_dd)*4);
TOGPU(d_dd_cos_table, h_cos_table, sizeof(GPU_dd)*4);
}
__device__
GPU_dd exp(const GPU_dd &a)
{
const double k = 512.0;
const double inv_k = 1.0 / k;
if (a.x <= -709.0)
return make_dd(0.0);
//!!!!!!!!!!!!!
if (a.x >= 709.0)
return make_dd(0.0);
//return dd_real::_inf;
if (is_zero(a))
return make_dd(1.0);
if (is_one(a))
return _dd_e;
double m = floor(a.x / _dd_log2.x + 0.5);
GPU_dd r = mul_pwr2(a - _dd_log2 * m, inv_k);
GPU_dd s, t, p;
p = sqr(r);
s = r + mul_pwr2(p, 0.5);
p = p * r;
t = p * dd_inv_fact[0];
int i = 0;
do {
s = s + t;
p = p * r;
t = p * dd_inv_fact[++i];
} while ((fabs(to_double(t)) > inv_k * _dd_eps) && (i < 5));
s = s + t;
for( int i = 0; i < 9; i++ )
{
s = mul_pwr2(s, 2.0) + sqr(s);
}
s = s + 1.0;
//#ifdef NATIVE_DOUBLE
// return ldexp(s, __double2int_rn(m));
//#else
return ldexp(s, int(m));
//#endif
}
/* Computes the square root of the double-double number dd.
NOTE: dd must be a non-negative number. */
__device__
GPU_dd sqrt(const GPU_dd &a)
{
if (is_zero(a))
return make_dd(0.0);
//!!!!!!!!!!!!!!
//TO DO: should make an error
if (is_negative(a)) {
//return _nan;
return make_dd( 0.0 );
}
double x = 1.0 / sqrt(a.x);
double ax = a.x * x;
return dd_add(ax, (a - sqr(ax)).x * (x * 0.5));
//return a - sqr(ax);
}
/* Logarithm. Computes log(x) in double-double precision.
This is a natural logarithm (i.e., base e). */
__device__
GPU_dd log(const GPU_dd &a)
{
if (is_one(a)) {
return make_dd(0.0);
}
//!!!!!!!!!
//TO DO: return an errro
if (a.x <= 0.0)
{
//return _nan;
return make_dd( 0.0 );
}
GPU_dd x = make_dd(log(a.x)); // Initial approximation
x = x + a * exp(negative(x)) - 1.0;
return x;
}
/* Computes sin(a) using Taylor series.
Assumes |a| <= pi/32. */
__device__
GPU_dd sin_taylor(const GPU_dd &a) {
const double thresh = 0.5 * fabs(to_double(a)) * _dd_eps;
GPU_dd r, s, t, x;
if (is_zero(a)) {
return make_dd(0.0);
}
int i = 0;
x = negative(sqr(a)); //-sqr(a);
s = a;
r = a;
do {
r = r*x;
t = r * dd_inv_fact[i];
s = s + t;
i += 2;
} while (i < n_dd_inv_fact && fabs(to_double(t)) > thresh);
return s;
}
__device__
GPU_dd cos_taylor(const GPU_dd &a) {
const double thresh = 0.5 * _dd_eps;
GPU_dd r, s, t, x;
if (is_zero(a)) {
return make_dd(1.0);
}
x = negative(sqr(a));
r = x;
s = 1.0 + mul_pwr2(r, 0.5);
int i = 1;
do {
r = r*x;
t = r * dd_inv_fact[i];
s = s + t;
i += 2;
} while (i < n_dd_inv_fact && fabs(to_double(t)) > thresh);
return s;
}
__device__
void sincos_taylor(const GPU_dd &a,
GPU_dd &sin_a, GPU_dd &cos_a) {
if (is_zero(a)) {
sin_a.x = 0.0; sin_a.y = 0.0;
cos_a.x = 1.0; cos_a.y = 0.0;
return;
}
sin_a = sin_taylor(a);
cos_a = sqrt(1.0 - sqr(sin_a));
}
__device__
GPU_dd sin(const GPU_dd &a, const GPU_dd* d_dd_sin_table, const GPU_dd* d_dd_cos_table) {
if (is_zero(a)) {
return make_dd(0.0);
}
// approximately reduce modulo 2*pi
GPU_dd z = nint(a / _dd_2pi);
GPU_dd r = a - _dd_2pi * z;
// approximately reduce modulo pi/2 and then modulo pi/16.
GPU_dd t;
double q = floor(r.x / _dd_pi2.x + 0.5);
t = r - _dd_pi2 * q;
int j = (int)(q);
q = floor(t.x / _dd_pi16.x + 0.5);
t = t - _dd_pi16 * q;
int k = (int)(q);
int abs_k = abs(k);
if (j < -2 || j > 2) {
//dd_real::error("(dd_real::sin): Cannot reduce modulo pi/2.");
r.x = r.y = 0.0;
return r;
}
if (abs_k > 4) {
//dd_real::error("(dd_real::sin): Cannot reduce modulo pi/16.");
r.x = r.y = 0.0;
return r;
}
if (k == 0) {
switch (j) {
case 0:
return sin_taylor(t);
case 1:
return cos_taylor(t);
case -1:
return negative(cos_taylor(t));
default:
return negative(sin_taylor(t));
}
}
GPU_dd u = d_dd_cos_table[abs_k-1];
GPU_dd v = d_dd_sin_table[abs_k-1];
GPU_dd sin_t, cos_t;
sincos_taylor(t, sin_t, cos_t);
if (j == 0) {
if (k > 0) {
r = u * sin_t + v * cos_t;
} else {
r = u * sin_t - v * cos_t;
}
} else if (j == 1) {
if (k > 0) {
r = u * cos_t - v * sin_t;
} else {
r = u * cos_t + v * sin_t;
}
} else if (j == -1) {
if (k > 0) {
r = v * sin_t - u * cos_t;
} else if (k < 0) {
//r = -u * cos_t - v * sin_t;
r = negative(u * cos_t) - v * sin_t;
}
} else {
if (k > 0) {
//r = -u * sin_t - v * cos_t;
r = negative(u * sin_t) - v * cos_t;
} else {
r = v * cos_t - u * sin_t;
}
}
return r;
}
__device__
GPU_dd cos(const GPU_dd &a, const GPU_dd* d_dd_sin_table, const GPU_dd* d_dd_cos_table) {
if (is_zero(a)) {
return make_dd(1.0);
}
// approximately reduce modulo 2*pi
GPU_dd z = nint(a / _dd_2pi);
GPU_dd r = a - z * _dd_2pi;
// approximately reduce modulo pi/2 and then modulo pi/16
GPU_dd t;
double q = floor(r.x / _dd_pi2.x + 0.5);
t = r - _dd_pi2 * q;
int j = (int)(q);
q = floor(t.x / _dd_pi16.x + 0.5);
t = t - _dd_pi16 * q;
int k = (int)(q);
int abs_k = abs(k);
if (j < -2 || j > 2) {
//dd_real::error("(dd_real::cos): Cannot reduce modulo pi/2.");
//return dd_real::_nan;
return make_dd(0.0);
}
if (abs_k > 4) {
//dd_real::error("(dd_real::cos): Cannot reduce modulo pi/16.");
//return dd_real::_nan;
return make_dd(0.0);
}
if (k == 0) {
switch (j) {
case 0:
return cos_taylor(t);
case 1:
return negative(sin_taylor(t));
case -1:
return sin_taylor(t);
default:
return negative(cos_taylor(t));
}
}
GPU_dd sin_t, cos_t;
sincos_taylor(t, sin_t, cos_t);
GPU_dd u = d_dd_cos_table[abs_k-1];
GPU_dd v = d_dd_sin_table[abs_k-1];
if (j == 0) {
if (k > 0) {
r = u * cos_t - v * sin_t;
} else {
r = u * cos_t + v * sin_t;
}
} else if (j == 1) {
if (k > 0) {
r = negative(u * sin_t) - v * cos_t;
} else {
r = v * cos_t - u * sin_t;
}
} else if (j == -1) {
if (k > 0) {
r = u * sin_t + v * cos_t;
} else {
r = u * sin_t - v * cos_t;
}
} else {
if (k > 0) {
r = v * sin_t - u * cos_t;
} else {
r = negative(u * cos_t) - v * sin_t;
}
}
return r;
}
#endif
| 6e7e0a872933717a76222bb03f45b86eb41c432e.cu | #ifndef _GDD_CU_
#define _GDD_CU_
//#include "gqd_api.h"
#include "gdd_inline.cu"
/** constants in the constant memory */
#define n_dd_inv_fact (15)
static __device__ __constant__ GPU_dd dd_inv_fact[n_dd_inv_fact];
GPU_dd* d_dd_sin_table = NULL;
GPU_dd* d_dd_cos_table = NULL;
/** init function */
void GDDFree() {
if(d_dd_sin_table) {
GPUFREE(d_dd_sin_table);
}
if(d_dd_cos_table) {
GPUFREE(d_dd_cos_table);
}
}
void GDDInit() {
printf("GDD initialization...\n");
//inverse table
GPU_dd h_inv_fact[] = {
make_dd( 1.66666666666666657e-01, 9.25185853854297066e-18),
make_dd( 4.16666666666666644e-02, 2.31296463463574266e-18),
make_dd( 8.33333333333333322e-03, 1.15648231731787138e-19),
make_dd( 1.38888888888888894e-03, -5.30054395437357706e-20),
make_dd( 1.98412698412698413e-04, 1.72095582934207053e-22),
make_dd( 2.48015873015873016e-05, 2.15119478667758816e-23),
make_dd( 2.75573192239858925e-06, -1.85839327404647208e-22),
make_dd( 2.75573192239858883e-07, 2.37677146222502973e-23),
make_dd( 2.50521083854417202e-08, -1.44881407093591197e-24),
make_dd( 2.08767569878681002e-09, -1.20734505911325997e-25),
make_dd( 1.60590438368216133e-10, 1.25852945887520981e-26),
make_dd( 1.14707455977297245e-11, 2.06555127528307454e-28),
make_dd( 7.64716373181981641e-13, 7.03872877733453001e-30),
make_dd( 4.77947733238738525e-14, 4.39920548583408126e-31),
make_dd( 2.81145725434552060e-15, 1.65088427308614326e-31)
};
CUDA_SAFE_CALL( cudaMemcpyToSymbol( dd_inv_fact, h_inv_fact, sizeof(GPU_dd)*n_dd_inv_fact ) );
GPU_dd h_sin_table [] = {
make_dd(1.950903220161282758e-01, -7.991079068461731263e-18),
make_dd(3.826834323650897818e-01, -1.005077269646158761e-17),
make_dd(5.555702330196021776e-01, 4.709410940561676821e-17),
make_dd(7.071067811865475727e-01, -4.833646656726456726e-17)
};
GPUMALLOC((void**)&d_dd_sin_table, sizeof(GPU_dd)*4);
TOGPU(d_dd_sin_table, h_sin_table, sizeof(GPU_dd)*4);
GPU_dd h_cos_table [] = {
make_dd(9.807852804032304306e-01, 1.854693999782500573e-17),
make_dd(9.238795325112867385e-01, 1.764504708433667706e-17),
make_dd(8.314696123025452357e-01, 1.407385698472802389e-18),
make_dd(7.071067811865475727e-01, -4.833646656726456726e-17)
};
GPUMALLOC((void**)&d_dd_cos_table, sizeof(GPU_dd)*4);
TOGPU(d_dd_cos_table, h_cos_table, sizeof(GPU_dd)*4);
}
__device__
GPU_dd exp(const GPU_dd &a)
{
const double k = 512.0;
const double inv_k = 1.0 / k;
if (a.x <= -709.0)
return make_dd(0.0);
//!!!!!!!!!!!!!
if (a.x >= 709.0)
return make_dd(0.0);
//return dd_real::_inf;
if (is_zero(a))
return make_dd(1.0);
if (is_one(a))
return _dd_e;
double m = floor(a.x / _dd_log2.x + 0.5);
GPU_dd r = mul_pwr2(a - _dd_log2 * m, inv_k);
GPU_dd s, t, p;
p = sqr(r);
s = r + mul_pwr2(p, 0.5);
p = p * r;
t = p * dd_inv_fact[0];
int i = 0;
do {
s = s + t;
p = p * r;
t = p * dd_inv_fact[++i];
} while ((fabs(to_double(t)) > inv_k * _dd_eps) && (i < 5));
s = s + t;
for( int i = 0; i < 9; i++ )
{
s = mul_pwr2(s, 2.0) + sqr(s);
}
s = s + 1.0;
//#ifdef NATIVE_DOUBLE
// return ldexp(s, __double2int_rn(m));
//#else
return ldexp(s, int(m));
//#endif
}
/* Computes the square root of the double-double number dd.
NOTE: dd must be a non-negative number. */
__device__
GPU_dd sqrt(const GPU_dd &a)
{
if (is_zero(a))
return make_dd(0.0);
//!!!!!!!!!!!!!!
//TO DO: should make an error
if (is_negative(a)) {
//return _nan;
return make_dd( 0.0 );
}
double x = 1.0 / sqrt(a.x);
double ax = a.x * x;
return dd_add(ax, (a - sqr(ax)).x * (x * 0.5));
//return a - sqr(ax);
}
/* Logarithm. Computes log(x) in double-double precision.
This is a natural logarithm (i.e., base e). */
__device__
GPU_dd log(const GPU_dd &a)
{
if (is_one(a)) {
return make_dd(0.0);
}
//!!!!!!!!!
//TO DO: return an errro
if (a.x <= 0.0)
{
//return _nan;
return make_dd( 0.0 );
}
GPU_dd x = make_dd(log(a.x)); // Initial approximation
x = x + a * exp(negative(x)) - 1.0;
return x;
}
/* Computes sin(a) using Taylor series.
Assumes |a| <= pi/32. */
__device__
GPU_dd sin_taylor(const GPU_dd &a) {
const double thresh = 0.5 * fabs(to_double(a)) * _dd_eps;
GPU_dd r, s, t, x;
if (is_zero(a)) {
return make_dd(0.0);
}
int i = 0;
x = negative(sqr(a)); //-sqr(a);
s = a;
r = a;
do {
r = r*x;
t = r * dd_inv_fact[i];
s = s + t;
i += 2;
} while (i < n_dd_inv_fact && fabs(to_double(t)) > thresh);
return s;
}
__device__
GPU_dd cos_taylor(const GPU_dd &a) {
const double thresh = 0.5 * _dd_eps;
GPU_dd r, s, t, x;
if (is_zero(a)) {
return make_dd(1.0);
}
x = negative(sqr(a));
r = x;
s = 1.0 + mul_pwr2(r, 0.5);
int i = 1;
do {
r = r*x;
t = r * dd_inv_fact[i];
s = s + t;
i += 2;
} while (i < n_dd_inv_fact && fabs(to_double(t)) > thresh);
return s;
}
__device__
void sincos_taylor(const GPU_dd &a,
GPU_dd &sin_a, GPU_dd &cos_a) {
if (is_zero(a)) {
sin_a.x = 0.0; sin_a.y = 0.0;
cos_a.x = 1.0; cos_a.y = 0.0;
return;
}
sin_a = sin_taylor(a);
cos_a = sqrt(1.0 - sqr(sin_a));
}
__device__
GPU_dd sin(const GPU_dd &a, const GPU_dd* d_dd_sin_table, const GPU_dd* d_dd_cos_table) {
if (is_zero(a)) {
return make_dd(0.0);
}
// approximately reduce modulo 2*pi
GPU_dd z = nint(a / _dd_2pi);
GPU_dd r = a - _dd_2pi * z;
// approximately reduce modulo pi/2 and then modulo pi/16.
GPU_dd t;
double q = floor(r.x / _dd_pi2.x + 0.5);
t = r - _dd_pi2 * q;
int j = (int)(q);
q = floor(t.x / _dd_pi16.x + 0.5);
t = t - _dd_pi16 * q;
int k = (int)(q);
int abs_k = abs(k);
if (j < -2 || j > 2) {
//dd_real::error("(dd_real::sin): Cannot reduce modulo pi/2.");
r.x = r.y = 0.0;
return r;
}
if (abs_k > 4) {
//dd_real::error("(dd_real::sin): Cannot reduce modulo pi/16.");
r.x = r.y = 0.0;
return r;
}
if (k == 0) {
switch (j) {
case 0:
return sin_taylor(t);
case 1:
return cos_taylor(t);
case -1:
return negative(cos_taylor(t));
default:
return negative(sin_taylor(t));
}
}
GPU_dd u = d_dd_cos_table[abs_k-1];
GPU_dd v = d_dd_sin_table[abs_k-1];
GPU_dd sin_t, cos_t;
sincos_taylor(t, sin_t, cos_t);
if (j == 0) {
if (k > 0) {
r = u * sin_t + v * cos_t;
} else {
r = u * sin_t - v * cos_t;
}
} else if (j == 1) {
if (k > 0) {
r = u * cos_t - v * sin_t;
} else {
r = u * cos_t + v * sin_t;
}
} else if (j == -1) {
if (k > 0) {
r = v * sin_t - u * cos_t;
} else if (k < 0) {
//r = -u * cos_t - v * sin_t;
r = negative(u * cos_t) - v * sin_t;
}
} else {
if (k > 0) {
//r = -u * sin_t - v * cos_t;
r = negative(u * sin_t) - v * cos_t;
} else {
r = v * cos_t - u * sin_t;
}
}
return r;
}
__device__
GPU_dd cos(const GPU_dd &a, const GPU_dd* d_dd_sin_table, const GPU_dd* d_dd_cos_table) {
if (is_zero(a)) {
return make_dd(1.0);
}
// approximately reduce modulo 2*pi
GPU_dd z = nint(a / _dd_2pi);
GPU_dd r = a - z * _dd_2pi;
// approximately reduce modulo pi/2 and then modulo pi/16
GPU_dd t;
double q = floor(r.x / _dd_pi2.x + 0.5);
t = r - _dd_pi2 * q;
int j = (int)(q);
q = floor(t.x / _dd_pi16.x + 0.5);
t = t - _dd_pi16 * q;
int k = (int)(q);
int abs_k = abs(k);
if (j < -2 || j > 2) {
//dd_real::error("(dd_real::cos): Cannot reduce modulo pi/2.");
//return dd_real::_nan;
return make_dd(0.0);
}
if (abs_k > 4) {
//dd_real::error("(dd_real::cos): Cannot reduce modulo pi/16.");
//return dd_real::_nan;
return make_dd(0.0);
}
if (k == 0) {
switch (j) {
case 0:
return cos_taylor(t);
case 1:
return negative(sin_taylor(t));
case -1:
return sin_taylor(t);
default:
return negative(cos_taylor(t));
}
}
GPU_dd sin_t, cos_t;
sincos_taylor(t, sin_t, cos_t);
GPU_dd u = d_dd_cos_table[abs_k-1];
GPU_dd v = d_dd_sin_table[abs_k-1];
if (j == 0) {
if (k > 0) {
r = u * cos_t - v * sin_t;
} else {
r = u * cos_t + v * sin_t;
}
} else if (j == 1) {
if (k > 0) {
r = negative(u * sin_t) - v * cos_t;
} else {
r = v * cos_t - u * sin_t;
}
} else if (j == -1) {
if (k > 0) {
r = u * sin_t + v * cos_t;
} else {
r = u * sin_t - v * cos_t;
}
} else {
if (k > 0) {
r = v * sin_t - u * cos_t;
} else {
r = negative(u * cos_t) - v * sin_t;
}
}
return r;
}
#endif
|
465496b78910d9cd505ca1dea18c1c9778640c5c.hip | // !!! This is a file automatically generated by hipify!!!
/*
* This CUDA-Cusparse code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}[double/complex double]
* Sparse/Dense --> Dense, Z=CuMatlab_full(Sparse/Dense(X)).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cusparse_v2.h>
#include "CuMatlab_fullD.cu"
#include "CuMatlab_fullZ.cu"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
extern "C" static void mexCuMatlab_fullD(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_fullZ(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
hipError_t errCode =hipGetDeviceCount(&nDevices);
//int nDevices;
//hipGetDeviceCount(&nDevices);
if (errCode != hipSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
if (nrhs==1 && nlhs==1) {
if (mxIsGPUArray(prhs[0])) {
mxGPUArray const *tempGPU;
tempGPU = mxGPUCreateFromMxArray(prhs[0]);
if (mxGPUGetClassID(tempGPU) == mxDOUBLE_CLASS && mxGPUGetComplexity(tempGPU) == mxREAL){
mexCuMatlab_fullD(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU);
return;
}
else if (mxGPUGetClassID(tempGPU) == mxDOUBLE_CLASS && mxGPUGetComplexity(tempGPU) == mxCOMPLEX){
mexCuMatlab_fullZ(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU);
return;
}
else{
mxGPUDestroyGPUArray(tempGPU);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
//
else if(!mxIsGPUArray(prhs[0])) {
if (mxGetClassID(prhs[0]) == mxDOUBLE_CLASS && (!mxIsComplex(prhs[0]))){
mexCuMatlab_fullD(nlhs, plhs,
nrhs, prhs);
return;
}
else if (mxGetClassID(prhs[0]) == mxDOUBLE_CLASS && (mxIsComplex(prhs[0]))){
mexCuMatlab_fullZ(nlhs, plhs,
nrhs, prhs);
return;
}
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
}
else if ((nrhs<1) || (nrhs>1) || (nlhs<1) || (nlhs>1) ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input/output arguments! input argument must be one and output argument must be one\n");
return;
}
}
| 465496b78910d9cd505ca1dea18c1c9778640c5c.cu |
/*
* This CUDA-Cusparse code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}[double/complex double]
* Sparse/Dense --> Dense, Z=CuMatlab_full(Sparse/Dense(X)).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cusparse_v2.h>
#include "CuMatlab_fullD.cu"
#include "CuMatlab_fullZ.cu"
#include <cuda.h>
#include <cuda_runtime.h>
extern "C" static void mexCuMatlab_fullD(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
extern "C" static void mexCuMatlab_fullZ(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[]);
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
int nDevices;
cudaError_t errCode =cudaGetDeviceCount(&nDevices);
//int nDevices;
//cudaGetDeviceCount(&nDevices);
if (errCode != cudaSuccess){
printf("Error! No CUDA devices found! \n");
return;
}
if (nrhs==1 && nlhs==1) {
if (mxIsGPUArray(prhs[0])) {
mxGPUArray const *tempGPU;
tempGPU = mxGPUCreateFromMxArray(prhs[0]);
if (mxGPUGetClassID(tempGPU) == mxDOUBLE_CLASS && mxGPUGetComplexity(tempGPU) == mxREAL){
mexCuMatlab_fullD(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU);
return;
}
else if (mxGPUGetClassID(tempGPU) == mxDOUBLE_CLASS && mxGPUGetComplexity(tempGPU) == mxCOMPLEX){
mexCuMatlab_fullZ(nlhs, plhs,
nrhs, prhs);
mxGPUDestroyGPUArray(tempGPU);
return;
}
else{
mxGPUDestroyGPUArray(tempGPU);
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
//
else if(!mxIsGPUArray(prhs[0])) {
if (mxGetClassID(prhs[0]) == mxDOUBLE_CLASS && (!mxIsComplex(prhs[0]))){
mexCuMatlab_fullD(nlhs, plhs,
nrhs, prhs);
return;
}
else if (mxGetClassID(prhs[0]) == mxDOUBLE_CLASS && (mxIsComplex(prhs[0]))){
mexCuMatlab_fullZ(nlhs, plhs,
nrhs, prhs);
return;
}
else{
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input arguments! %s\n");
}
}
}
else if ((nrhs<1) || (nrhs>1) || (nlhs<1) || (nlhs>1) ) {
mexErrMsgIdAndTxt( "MATLAB:mexatexit:invalidInput",
"Incorrect input/output arguments! input argument must be one and output argument must be one\n");
return;
}
}
|
4e614542ced092e3017568cd9f20b883945dcb17.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by cheesema on 05.04.18.
//
#include "APRDownsampleGPU.hpp"
template<typename inputType, typename outputType>
__global__ void _fill_tree_mean_max(const uint64_t* __restrict__ level_xz_vec,
const uint64_t* __restrict__ xz_end_vec,
const uint16_t* __restrict__ y_vec,
const inputType* __restrict__ input_particles,
const uint64_t* __restrict__ level_xz_vec_tree,
const uint64_t* __restrict__ xz_end_vec_tree,
const uint16_t* __restrict__ y_vec_tree,
outputType* __restrict__ particle_data_output,
const int z_num,
const int x_num,
const int y_num,
const int z_num_parent,
const int x_num_parent,
const int y_num_parent,
const int level,
const int* __restrict__ offset_ind) {
const int index = offset_ind[blockIdx.x];
const int z_p = index/x_num_parent;
const int x_p = index - z_p*x_num_parent;
const int x_index = (2 * x_p + threadIdx.x/64);
const int z_index = (2 * z_p + (threadIdx.x/32)%2);
const int block = threadIdx.x/32;
const int local_th = (threadIdx.x%32);
__shared__ size_t global_index_begin_0_s[4];
__shared__ size_t global_index_end_0_s[4];
__shared__ float parent_cache[8][16];
if(local_th < 16) {
parent_cache[2 * block][local_th] = 0;
parent_cache[2 * block + 1][local_th] = 0;
}
if( (x_index >= x_num) || (z_index >= z_num) ){
return; //out of bounds
}
if((local_th==0) ) {
size_t xz_start_s = x_index + z_index * x_num + level_xz_vec[level];
global_index_begin_0_s[block] = xz_end_vec[xz_start_s - 1];
global_index_end_0_s[block] = xz_end_vec[xz_start_s];
}
__syncthreads();
if(global_index_begin_0_s[0] == global_index_end_0_s[0]){
return;
}
const size_t global_index_begin_0 = global_index_begin_0_s[block];
const size_t global_index_end_0 = global_index_end_0_s[block];
float current_val = 0;
float scale_factor_xz = (((2*x_num_parent != x_num) && x_p==(x_num_parent-1) ) + ((2*z_num_parent != z_num) && z_p==(z_num_parent-1) ))*2;
if(scale_factor_xz == 0){
scale_factor_xz = 1;
}
float scale_factor_yxz = scale_factor_xz;
if((2*y_num_parent != y_num)){
scale_factor_yxz = scale_factor_xz*2;
}
size_t xz_start = x_p + z_p*x_num_parent + level_xz_vec_tree[level-1];
const size_t global_index_begin_p = xz_end_vec_tree[xz_start - 1];
const size_t global_index_end_p = xz_end_vec_tree[xz_start];
int current_y, current_y_p;
__syncwarp();
if ((global_index_begin_0 + local_th) < global_index_end_0) {
current_val = input_particles[global_index_begin_0 + local_th];
current_y = y_vec[global_index_begin_0 + local_th];
} else {
current_y = INT32_MAX;
}
__syncwarp();
if (block == 0) {
if (( global_index_begin_p + local_th) < global_index_end_p) {
current_y_p = y_vec_tree[global_index_begin_p + local_th];
} else{
current_y_p = INT32_MAX;
}
}
__syncwarp();
const int block_start = y_vec[global_index_begin_0_s[0]] / 32;
const int block_end = (y_vec[global_index_end_0_s[0] - 1] + 31) / 32;
int sparse_block = 0;
int sparse_block_p = 0;
for (int y_block = block_start; y_block < block_end; ++y_block) {
__syncthreads();
//value less then current chunk then update.
while(current_y < y_block * 32) {
sparse_block++;
if ((sparse_block * 32 + global_index_begin_0 + local_th) < global_index_end_0) {
current_val = input_particles[sparse_block * 32 + global_index_begin_0 + local_th];
current_y = y_vec[sparse_block * 32 + global_index_begin_0 + local_th];
} else {
current_y = INT32_MAX;
}
}
__syncwarp();
//update the down-sampling cache
if ((current_y < (y_block + 1) * 32) && (current_y >= y_block * 32)) {
parent_cache[2*block+current_y%2][(current_y/2) % 16] = (1.0f/8.0f)*current_val;
}
__syncwarp();
//fetch the parent particle data
if (block == 0) {
while(current_y_p < ((y_block * 32)/2)) {
sparse_block_p++;
if ((sparse_block_p * 32 + global_index_begin_p + local_th) < global_index_end_p) {
current_y_p = y_vec_tree[sparse_block_p * 32 + global_index_begin_p + local_th];
} else {
current_y_p = INT32_MAX;
}
}
}
__syncthreads();
if(block == 0) {
if ( (current_y_p < ((y_block+1) * 32)/2) ) {
if ((sparse_block_p * 32 + global_index_begin_p + local_th) < global_index_end_p) {
if(current_y_p == (y_num_parent-1)) {
particle_data_output[sparse_block_p * 32 + global_index_begin_p + local_th] =
scale_factor_yxz*( parent_cache[0][current_y_p % 16] +
parent_cache[1][current_y_p % 16] +
parent_cache[2][current_y_p % 16] +
parent_cache[3][current_y_p % 16] +
parent_cache[4][current_y_p % 16] +
parent_cache[5][current_y_p % 16] +
parent_cache[6][current_y_p % 16] +
parent_cache[7][current_y_p % 16]);
} else {
particle_data_output[sparse_block_p * 32 + global_index_begin_p + local_th] =
scale_factor_xz*( parent_cache[0][current_y_p % 16] +
parent_cache[1][current_y_p % 16] +
parent_cache[2][current_y_p % 16] +
parent_cache[3][current_y_p % 16] +
parent_cache[4][current_y_p % 16] +
parent_cache[5][current_y_p % 16] +
parent_cache[6][current_y_p % 16] +
parent_cache[7][current_y_p % 16]);
}
}
}
}
__syncthreads();
if(local_th < 16) {
parent_cache[2 * block][local_th] = 0;
parent_cache[2 * block + 1][local_th] = 0;
}
}
}
template<typename inputType, typename outputType>
__global__ void _fill_tree_mean_interior(const uint64_t* __restrict__ level_xz_vec,
const uint64_t* __restrict__ xz_end_vec,
const uint16_t* __restrict__ y_vec,
const inputType* __restrict__ input_particles,
const uint64_t* __restrict__ level_xz_vec_tree,
const uint64_t* __restrict__ xz_end_vec_tree,
const uint16_t* __restrict__ y_vec_tree,
outputType* __restrict__ particle_data_output,
const int z_num,
const int x_num,
const int y_num,
const int z_num_parent,
const int x_num_parent,
const int y_num_parent,
const int level,
const int* __restrict__ offset_ind) {
//
// This step is required for the interior down-sampling
//
const int index = offset_ind[blockIdx.x];
const int z_p = index/x_num_parent;
const int x_p = index - z_p*x_num_parent;
//Local identifiers.
int x_index = (2 * x_p + threadIdx.x/64);
int z_index = (2 * z_p + ((threadIdx.x)/32)%2);
const int block = threadIdx.x/32;
const int local_th = (threadIdx.x%32);
//Particles
__shared__ std::size_t global_index_begin_0[4];
__shared__ std::size_t global_index_end_0[4];
//Parent Tree Particle Cells
__shared__ std::size_t global_index_begin_p[4];
__shared__ std::size_t global_index_end_p[4];
//Tree Particle Cells
__shared__ std::size_t global_index_begin_t[4];
__shared__ std::size_t global_index_end_t[4];
//shared memory caches
__shared__ float parent_cache[8][16]; //16 needed padded with 17 entries to optimize for bank conflicts.
if(local_th < 16) {
parent_cache[2 * block][local_th] = 0;
parent_cache[2 * block + 1][local_th] = 0;
}
__syncwarp();
if((x_index >= x_num) || (z_index >= z_num) ){
return;
}
if(local_th == 0) {
size_t xz_start = x_index + z_index * x_num + level_xz_vec_tree[level];
global_index_begin_t[block] = xz_end_vec_tree[xz_start - 1];
global_index_end_t[block] = xz_end_vec_tree[xz_start];
}
__syncwarp();
if(local_th == 1) {
size_t xz_start = x_index + z_index * x_num + level_xz_vec[level];
global_index_begin_0[block] = xz_end_vec[xz_start - 1];
global_index_end_0[block] = xz_end_vec[xz_start];
}
__syncwarp();
if(local_th == 2) {
size_t xz_start = x_p + z_p * x_num_parent + level_xz_vec_tree[level - 1];
global_index_begin_p[block] = xz_end_vec_tree[xz_start - 1];
global_index_end_p[block] = xz_end_vec_tree[xz_start];
}
__syncthreads();
if((global_index_begin_0[block] == global_index_end_0[block]) && (global_index_begin_t[block] == global_index_end_t[block])){
return;
}
float scale_factor_xz = (((2*x_num_parent != x_num) && x_p==(x_num_parent-1) ) + ((2*z_num_parent != z_num) && z_p==(z_num_parent-1) ))*2;
if(scale_factor_xz == 0){
scale_factor_xz = 1;
}
float scale_factor_yxz = scale_factor_xz;
if((2*y_num_parent != y_num)){
scale_factor_yxz = scale_factor_xz*2;
}
int y_0, y_p, y_t;
float f_0, f_t;
__syncwarp();
if ((global_index_begin_0[block] + local_th) < global_index_end_0[block]) {
y_0 = y_vec[global_index_begin_0[block] + local_th];
f_0 = input_particles[global_index_begin_0[block] + local_th];
} else {
y_0 = INT32_MAX;
}
__syncwarp();
if ((global_index_begin_t[block] + local_th) < global_index_end_t[block]) {
y_t = y_vec_tree[global_index_begin_t[block] + local_th];
f_t = particle_data_output[global_index_begin_t[block] + local_th];
} else {
y_t = INT32_MAX;
}
__syncwarp();
if (block == 0) {
if (( global_index_begin_p[block] + local_th) < global_index_end_p[block]) {
y_p = y_vec_tree[global_index_begin_p[block] + local_th];
} else {
y_p = INT32_MAX;
}
}
__syncwarp();
const int block_start = y_vec_tree[global_index_begin_p[0]] / 16;
const int block_end = ((2 * y_vec_tree[max(global_index_end_p[0], (size_t)1) - 1] + 32) / 32); // "ceil( (2 * y_tree + 1) / 32 )"
int sparse_block = 0;
int sparse_block_p = 0;
int sparse_block_t = 0;
for (int y_block = block_start; y_block < block_end; ++y_block) {
__syncthreads();
// update apr particle
while(y_0 < (y_block * 32)) {
sparse_block++;
if ((sparse_block * 32 + global_index_begin_0[block] + local_th) < global_index_end_0[block]) {
f_0 = input_particles[sparse_block * 32 + global_index_begin_0[block] + local_th];
y_0 = y_vec[sparse_block * 32 + global_index_begin_0[block] + local_th];
} else{
y_0 = INT32_MAX;
}
}
__syncthreads();
// update tree particle
while(y_t < (y_block * 32)) {
sparse_block_t++;
if ((sparse_block_t * 32 + global_index_begin_t[block] + local_th) < global_index_end_t[block]) {
f_t = particle_data_output[sparse_block_t * 32 + global_index_begin_t[block] + local_th];
y_t = y_vec_tree[sparse_block_t * 32 + global_index_begin_t[block] + local_th];
} else{
y_t = INT32_MAX;
}
}
__syncwarp();
///update the down-sampling cache
//insert apr particles
if (y_0 < (y_block + 1) * 32) {
parent_cache[2*block + y_0 % 2][(y_0 / 2) % 16] = (1.0f / 8.0f) * f_0;
}
__syncwarp();
//insert tree particles
if (y_t < (y_block + 1) * 32) {
parent_cache[2*block + y_t % 2][(y_t / 2) % 16] = (1.0f / 8.0f) * f_t;
}
__syncwarp();
// update parent particle
if (block == 0) {
while(y_p < ((y_block * 32) / 2)) {
sparse_block_p++;
if ((sparse_block_p * 32 + global_index_begin_p[block] + local_th) < global_index_end_p[block]) {
y_p = y_vec_tree[sparse_block_p * 32 + global_index_begin_p[block] + local_th];
} else{
y_p = INT32_MAX;
}
}
}
__syncthreads();
// perform the reduction and write result to output array
if(block == 0) {
if (y_p < ((y_block + 1) * 32) / 2) { //current_y_p >= ((y_block) * 32)/2 is guaranteed from update step
if ((sparse_block_p * 32 + global_index_begin_p[block] + local_th) < global_index_end_p[block]) {
if (y_p == (y_num_parent - 1)) {
particle_data_output[sparse_block_p * 32 + global_index_begin_p[block] + local_th] =
scale_factor_yxz * (parent_cache[0][y_p % 16] +
parent_cache[1][y_p % 16] +
parent_cache[2][y_p % 16] +
parent_cache[3][y_p % 16] +
parent_cache[4][y_p % 16] +
parent_cache[5][y_p % 16] +
parent_cache[6][y_p % 16] +
parent_cache[7][y_p % 16]);
} else {
particle_data_output[sparse_block_p * 32 + global_index_begin_p[block] + local_th] =
scale_factor_xz * (parent_cache[0][y_p % 16] +
parent_cache[1][y_p % 16] +
parent_cache[2][y_p % 16] +
parent_cache[3][y_p % 16] +
parent_cache[4][y_p % 16] +
parent_cache[5][y_p % 16] +
parent_cache[6][y_p % 16] +
parent_cache[7][y_p % 16]);
}
}
}
}
__syncthreads();
// reset the cache
if(local_th < 16) {
parent_cache[2 * block][local_th] = 0;
parent_cache[2 * block + 1][local_th] = 0;
}
}
}
template<typename inputType, typename outputType>
__global__ void _fill_tree_mean_max_alt(const uint64_t* __restrict__ level_xz_vec,
const uint64_t* __restrict__ xz_end_vec,
const uint16_t* __restrict__ y_vec,
const inputType* __restrict__ input_particles,
const uint64_t* __restrict__ level_xz_vec_tree,
const uint64_t* __restrict__ xz_end_vec_tree,
const uint16_t* __restrict__ y_vec_tree,
outputType* __restrict__ particle_data_output,
const int z_num,
const int x_num,
const int y_num,
const int z_num_parent,
const int x_num_parent,
const int y_num_parent,
const int level) {
const int z_index = blockIdx.z * blockDim.z + threadIdx.z;
const int x_index = blockIdx.x * blockDim.y + threadIdx.y;
const int block = threadIdx.z * 2 + threadIdx.y;
const int local_th = threadIdx.x;
__shared__ float parent_cache[8][16];
if(local_th < 16) {
parent_cache[2 * block][local_th] = 0;
parent_cache[2 * block + 1][local_th] = 0;
}
if( (x_index >= x_num) || (z_index >= z_num) ){
return; //out of bounds
}
__shared__ size_t global_index_begin_0_s[4];
__shared__ size_t global_index_end_0_s[4];
if((local_th==0) ) {
size_t xz_start_s = x_index + z_index * x_num + level_xz_vec[level];
global_index_begin_0_s[block] = xz_end_vec[xz_start_s - 1];
global_index_end_0_s[block] = xz_end_vec[xz_start_s];
}
__syncthreads();
if(global_index_begin_0_s[0] == global_index_end_0_s[0]){
return;
}
const size_t global_index_begin_0 = global_index_begin_0_s[block];
const size_t global_index_end_0 = global_index_end_0_s[block];
float scale_factor_xz = (((2*x_num_parent != x_num) && (x_index / 2)==(x_num_parent-1) ) + ((2*z_num_parent != z_num) && (z_index / 2)==(z_num_parent-1) ))*2;
if(scale_factor_xz == 0){
scale_factor_xz = 1;
}
float scale_factor_yxz = scale_factor_xz;
if((2*y_num_parent != y_num)){
scale_factor_yxz = scale_factor_xz*2;
}
size_t xz_start = (x_index / 2) + (z_index / 2)*x_num_parent + level_xz_vec_tree[level-1];
const size_t global_index_begin_p = xz_end_vec_tree[xz_start - 1];
const size_t global_index_end_p = xz_end_vec_tree[xz_start];
int current_y;
int current_y_p;
float current_val = 0;
//initialize (i=0)
if ((global_index_begin_0 + local_th) < global_index_end_0) {
current_val = input_particles[global_index_begin_0 + local_th];
current_y = y_vec[global_index_begin_0 + local_th];
} else {
current_y = INT32_MAX;
}
if (block == 0) {
if (( global_index_begin_p + local_th) < global_index_end_p) {
current_y_p = y_vec_tree[global_index_begin_p + local_th];
} else{
current_y_p = INT32_MAX;
}
}
const int block_start = y_vec[global_index_begin_0_s[0]] / 32;
const int block_end = (y_vec[global_index_end_0_s[0]-1] + 31) / 32;
int sparse_block = 0;
int sparse_block_p = 0;
for (int y_block = block_start; y_block < block_end; ++y_block) {
__syncthreads();
//value less then current chunk then update.
while(current_y < y_block * 32) {
sparse_block++;
if ((sparse_block * 32 + global_index_begin_0 + local_th) < global_index_end_0) {
current_val = input_particles[sparse_block * 32 + global_index_begin_0 + local_th];
current_y = y_vec[sparse_block * 32 + global_index_begin_0 + local_th];
} else{
current_y = INT32_MAX;
}
}
__syncthreads();
//update the down-sampling caches
if ((current_y < (y_block + 1) * 32) && (current_y >= (y_block) * 32)) {
parent_cache[2*block+current_y%2][(current_y/2) % 16] = (1.0f/8.0f)*current_val;
}
__syncthreads();
//fetch the parent particle data
if (block == 0) {
while(current_y_p < ((y_block * 32)/2)) {
sparse_block_p++;
if ((sparse_block_p * 32 + global_index_begin_p + local_th) < global_index_end_p) {
current_y_p = y_vec_tree[sparse_block_p * 32 + global_index_begin_p + local_th];
} else {
current_y_p = INT32_MAX;
}
}
}
__syncthreads();
if(block == 0) {
if (current_y_p < ((y_block+1) * 32)/2) {
if ((sparse_block_p * 32 + global_index_begin_p + local_th) < global_index_end_p) {
if(current_y_p == (y_num_parent-1)) {
particle_data_output[sparse_block_p * 32 + global_index_begin_p + local_th] =
scale_factor_yxz*( parent_cache[0][current_y_p % 16] +
parent_cache[1][current_y_p % 16] +
parent_cache[2][current_y_p % 16] +
parent_cache[3][current_y_p % 16] +
parent_cache[4][current_y_p % 16] +
parent_cache[5][current_y_p % 16] +
parent_cache[6][current_y_p % 16] +
parent_cache[7][current_y_p % 16]);
} else {
particle_data_output[sparse_block_p * 32 + global_index_begin_p + local_th] =
scale_factor_xz*( parent_cache[0][current_y_p % 16] +
parent_cache[1][current_y_p % 16] +
parent_cache[2][current_y_p % 16] +
parent_cache[3][current_y_p % 16] +
parent_cache[4][current_y_p % 16] +
parent_cache[5][current_y_p % 16] +
parent_cache[6][current_y_p % 16] +
parent_cache[7][current_y_p % 16]);
}
}
}
}
__syncthreads();
if(local_th < 16) {
parent_cache[2 * block][local_th] = 0;
parent_cache[2 * block + 1][local_th] = 0;
}
}
}
template<typename inputType, typename outputType>
__global__ void _fill_tree_mean_interior_alt(const uint64_t* __restrict__ level_xz_vec,
const uint64_t* __restrict__ xz_end_vec,
const uint16_t* __restrict__ y_vec,
const inputType* __restrict__ input_particles,
const uint64_t* __restrict__ level_xz_vec_tree,
const uint64_t* __restrict__ xz_end_vec_tree,
const uint16_t* __restrict__ y_vec_tree,
outputType* __restrict__ particle_data_output,
const int z_num,
const int x_num,
const int y_num,
const int z_num_parent,
const int x_num_parent,
const int y_num_parent,
const int level) {
//
// This step is required for the interior down-sampling
//
const int z_index = blockIdx.z * blockDim.z + threadIdx.z;
const int x_index = blockIdx.x * blockDim.y + threadIdx.y;
const int block = threadIdx.z * 2 + threadIdx.y;
const int local_th = threadIdx.x;
//shared memory cache
__shared__ float parent_cache[8][16]; //16 needed padded with 17 entries to optimize for bank conflicts.
if(local_th < 16) {
parent_cache[2 * block][local_th] = 0;
parent_cache[2 * block + 1][local_th] = 0;
}
if((x_index >= x_num) || (z_index >= z_num) ){
return;
}
//Particles
__shared__ std::size_t global_index_begin_0[4];
__shared__ std::size_t global_index_end_0[4];
//Parent Tree Particle Cells
__shared__ std::size_t global_index_begin_p[4];
__shared__ std::size_t global_index_end_p[4];
//Interior Tree Particle Cells
__shared__ std::size_t global_index_begin_t[4];
__shared__ std::size_t global_index_end_t[4];
if(local_th == 0) {
size_t xz_start = x_index + z_index * x_num + level_xz_vec_tree[level];
global_index_begin_t[block] = xz_end_vec_tree[xz_start - 1];
global_index_end_t[block] = xz_end_vec_tree[xz_start];
}
if(local_th == 1) {
size_t xz_start = x_index + z_index * x_num + level_xz_vec[level];
global_index_begin_0[block] = xz_end_vec[xz_start - 1];
global_index_end_0[block] = xz_end_vec[xz_start];
}
if(local_th == 2) {
size_t xz_start = (x_index / 2) + (z_index / 2) * x_num_parent + level_xz_vec_tree[level - 1];
global_index_begin_p[block] = xz_end_vec_tree[xz_start - 1];
global_index_end_p[block] = xz_end_vec_tree[xz_start];
}
__syncthreads();
if((global_index_begin_0[block] == global_index_end_0[block]) && (global_index_begin_t[block] == global_index_end_t[block])){
return;
}
float scale_factor_xz = (((2*x_num_parent != x_num) && (x_index / 2)==(x_num_parent-1) ) + ((2*z_num_parent != z_num) && (z_index / 2)==(z_num_parent-1) ))*2;
if(scale_factor_xz == 0){
scale_factor_xz = 1;
}
float scale_factor_yxz = scale_factor_xz;
if((2*y_num_parent != y_num)){
scale_factor_yxz = scale_factor_xz*2;
}
int y_0, y_p, y_t;
float f_0, f_t;
__syncthreads();
//each thread grabs a particle
//from the apr
if ((global_index_begin_0[block] + local_th) < global_index_end_0[block]) {
y_0 = y_vec[global_index_begin_0[block] + local_th];
f_0 = input_particles[global_index_begin_0[block] + local_th];
} else {
y_0 = INT32_MAX;
}
__syncthreads();
//from the tree
if ((global_index_begin_t[block] + local_th) < global_index_end_t[block]) {
y_t = y_vec_tree[global_index_begin_t[block] + local_th];
f_t = particle_data_output[global_index_begin_t[block] + local_th];
} else {
y_t = INT32_MAX;
}
__syncthreads();
//parent particle (tree)
if (block == 0) {
if (( global_index_begin_p[block] + local_th) < global_index_end_p[block]) {
y_p = y_vec_tree[global_index_begin_p[block] + local_th];
} else {
y_p = INT32_MAX;
}
}
const int block_start = y_vec_tree[global_index_begin_p[0]] / 16;
const int block_end = ((2 * y_vec_tree[max(global_index_end_p[0], (size_t)1) - 1] + 32) / 32); // "ceil( (2 * y_tree + 1) / 32 )"
int sparse_block = 0;
int sparse_block_p = 0;
int sparse_block_t = 0;
for (int y_block = block_start; y_block < block_end; ++y_block) {
__syncthreads();
//value less then current chunk then update.
while(y_0 < (y_block * 32)) {
sparse_block++;
if ((sparse_block * 32 + global_index_begin_0[block] + local_th) < global_index_end_0[block]) {
f_0 = input_particles[sparse_block * 32 + global_index_begin_0[block] + local_th];
y_0 = y_vec[sparse_block * 32 + global_index_begin_0[block] + local_th];
} else{
y_0 = INT32_MAX;
}
}
__syncthreads();
//interior tree update
while(y_t < (y_block * 32)) {
sparse_block_t++;
if ((sparse_block_t * 32 + global_index_begin_t[block] + local_th) < global_index_end_t[block]) {
f_t = particle_data_output[sparse_block_t * 32 + global_index_begin_t[block] + local_th];
y_t = y_vec_tree[sparse_block_t * 32 + global_index_begin_t[block] + local_th];
} else{
y_t = INT32_MAX;
}
}
__syncthreads();
//update the down-sampling caches
if( y_0 < (y_block + 1) * 32 ) {
parent_cache[2*block + y_0 % 2][(y_0 / 2) % 16] = (1.0f / 8.0f) * f_0;
}
__syncthreads();
//now the interior tree nodes
if ( y_t < (y_block + 1) * 32 ) {
parent_cache[2*block + y_t % 2][(y_t / 2) % 16] = (1.0f / 8.0f) * f_t;
}
__syncthreads();
if (block == 0) {
while(y_p < ((y_block * 32) / 2)) {
sparse_block_p++;
if ((sparse_block_p * 32 + global_index_begin_p[block] + local_th) < global_index_end_p[block]) {
y_p = y_vec_tree[sparse_block_p * 32 + global_index_begin_p[block] + local_th];
} else {
y_p = INT32_MAX;
}
}
}
__syncthreads();
if(block == 0) {
if ( y_p < ((y_block + 1) * 32) / 2 ) {
if ((sparse_block_p * 32 + global_index_begin_p[block] + local_th) < global_index_end_p[block]) {
if (y_p == (y_num_parent - 1)) {
particle_data_output[sparse_block_p * 32 + global_index_begin_p[block] + local_th] =
scale_factor_yxz * (parent_cache[0][y_p % 16] +
parent_cache[1][y_p % 16] +
parent_cache[2][y_p % 16] +
parent_cache[3][y_p % 16] +
parent_cache[4][y_p % 16] +
parent_cache[5][y_p % 16] +
parent_cache[6][y_p % 16] +
parent_cache[7][y_p % 16]);
} else {
particle_data_output[sparse_block_p * 32 + global_index_begin_p[block] + local_th] =
scale_factor_xz * (parent_cache[0][y_p % 16] +
parent_cache[1][y_p % 16] +
parent_cache[2][y_p % 16] +
parent_cache[3][y_p % 16] +
parent_cache[4][y_p % 16] +
parent_cache[5][y_p % 16] +
parent_cache[6][y_p % 16] +
parent_cache[7][y_p % 16]);
}
}
}
}
__syncthreads();
if(local_th < 16) {
parent_cache[2 * block][local_th] = 0;
parent_cache[2 * block + 1][local_th] = 0;
}
}
}
template<int blockSize_z, int blockSize_x>
__global__ void _count_ne_rows_tree_cuda(const uint64_t* __restrict__ level_xz_vec_tree,
const uint64_t* __restrict__ xz_end_vec_tree,
const int z_num,
const int x_num,
const int level,
int* __restrict__ res) {
__shared__ int local_counts[blockSize_x][blockSize_z];
local_counts[threadIdx.y][threadIdx.x] = 0;
const int z_index = blockIdx.x * blockDim.x + threadIdx.x;
if(z_index >= z_num) { return; } // out of bounds
size_t level_start = level_xz_vec_tree[level];
int x_index = threadIdx.y;
int counter = 0;
// loop over x-dimension in chunks
while( x_index < x_num ) {
size_t xz_start = z_index * x_num + x_index + level_start;
// if row is non-empty
if( xz_end_vec_tree[xz_start - 1] < xz_end_vec_tree[xz_start]) {
counter++;
}
x_index += blockDim.y;
}
__syncthreads();
local_counts[threadIdx.y][threadIdx.x] = counter;
__syncthreads();
// reduce over blockDim.y to get the count for each z_index
for(int gap = blockSize_x/2; gap > 0; gap/=2) {
if(threadIdx.y < gap) {
local_counts[threadIdx.y][threadIdx.x] += local_counts[threadIdx.y + gap][threadIdx.x];
}
__syncthreads();
}
// now reduce over blockDim.x to get the block count
for(int gap = blockSize_z/2; gap > 0; gap/=2) {
if(threadIdx.x < gap && threadIdx.y == 0) {
local_counts[0][threadIdx.x] += local_counts[0][threadIdx.x + gap];
}
__syncthreads();
}
if(threadIdx.x == 0 && threadIdx.y == 0) {
res[blockIdx.x] = local_counts[0][0];
}
}
__device__ unsigned int count = 0;
__global__ void _fill_ne_rows_tree_cuda(const uint64_t* __restrict__ level_xz_vec_tree,
const uint64_t* __restrict__ xz_end_vec_tree,
const int z_num,
const int x_num,
const int level,
unsigned int ne_count,
unsigned int offset,
int* __restrict__ ne_rows) {
const int z_index = blockIdx.x * blockDim.x + threadIdx.x;
if (z_index >= z_num) { return; } // out of bounds
size_t level_start = level_xz_vec_tree[level];
int x_index = threadIdx.y;
// loop over x-dimension in chunks
while (x_index < x_num) {
size_t xz_start = z_index * x_num + x_index + level_start;
// if row is non-empty
if( xz_end_vec_tree[xz_start - 1] < xz_end_vec_tree[xz_start]) {
unsigned int index = atomicInc(&count, ne_count-1);
ne_rows[offset + index] = z_index * x_num + x_index;
}
x_index += blockDim.y;
}
}
template<int blockSize_z, int blockSize_x>
void compute_ne_rows_tree_cuda(GPUAccessHelper& tree_access, VectorData<int>& ne_count, ScopedCudaMemHandler<int*, JUST_ALLOC>& ne_rows_gpu) {
ne_count.resize(tree_access.level_max() + 3);
int z_blocks_max = (tree_access.z_num(tree_access.level_max()) + blockSize_z - 1) / blockSize_z;
int num_levels = tree_access.level_max() - tree_access.level_min() + 1;
int block_sums_host[z_blocks_max * num_levels];
int *block_sums_device;
error_check(hipMalloc(&block_sums_device, z_blocks_max*num_levels*sizeof(int)) )
error_check( hipMemset(block_sums_device, 0, z_blocks_max*num_levels*sizeof(int)) )
// error_check( hipDeviceSynchronize() )
int offset = 0;
for(int level = tree_access.level_min(); level <= tree_access.level_max(); ++level) {
int z_blocks = (tree_access.z_num(level) + blockSize_z - 1) / blockSize_z;
dim3 grid_dim(z_blocks, 1, 1);
dim3 block_dim(blockSize_z, blockSize_x, 1);
_count_ne_rows_tree_cuda<blockSize_z, blockSize_x>
<< < grid_dim, block_dim >> >
(tree_access.get_level_xz_vec_ptr(),
tree_access.get_xz_end_vec_ptr(),
tree_access.z_num(level),
tree_access.x_num(level),
level,
block_sums_device + offset);
offset += z_blocks_max;
}
error_check(hipDeviceSynchronize())
error_check(hipMemcpy(block_sums_host, block_sums_device, z_blocks_max * num_levels * sizeof(int), hipMemcpyDeviceToHost) )
int counter = 0;
offset = 0;
for(int level = tree_access.level_min(); level <= tree_access.level_max(); ++level) {
ne_count[level+1] = counter;
for(int i = 0; i < z_blocks_max; ++i) {
counter += block_sums_host[offset + i];
}
offset += z_blocks_max;
}
ne_count.back() = counter;
ne_rows_gpu.initialize(NULL, counter);
for(int level = (tree_access.level_min() + 1); level <= (tree_access.level_max() + 1); ++level) {
int ne_sz = ne_count[level+1] - ne_count[level];
if( ne_sz == 0 ) {
continue;
}
int z_blocks = (tree_access.z_num(level - 1) + blockSize_z - 1) / blockSize_z;
dim3 grid_dim(z_blocks, 1, 1);
dim3 block_dim(blockSize_z, blockSize_x, 1);
hipLaunchKernelGGL(( _fill_ne_rows_tree_cuda), dim3(grid_dim), dim3(block_dim) , 0, 0,
tree_access.get_level_xz_vec_ptr(),
tree_access.get_xz_end_vec_ptr(),
tree_access.z_num(level-1),
tree_access.x_num(level-1),
level-1,
ne_sz,
ne_count[level],
ne_rows_gpu.get());
}
error_check(hipFree(block_sums_device) )
}
void compute_ne_rows_tree(GPUAccessHelper& tree_access, VectorData<int>& ne_counter, VectorData<int>& ne_rows) {
ne_counter.resize(tree_access.level_max() + 3);
int z = 0;
int x = 0;
uint64_t counter = 0;
for (int level = (tree_access.level_min() + 1); level <= (tree_access.level_max() + 1); ++level) {
auto level_start = tree_access.linearAccess->level_xz_vec[level - 1];
ne_counter[level] = counter;
for (z = 0; z < tree_access.z_num(level - 1); z++) {
for (x = 0; x < tree_access.x_num(level - 1); ++x) {
auto offset = x + z * tree_access.x_num(level - 1);
auto xz_start = level_start + offset;
auto begin_index = tree_access.linearAccess->xz_end_vec[xz_start - 1];
auto end_index = tree_access.linearAccess->xz_end_vec[xz_start];
if (begin_index < end_index) {
counter++;
}
}
}
}
ne_rows.resize(counter);
ne_counter.back() = ne_rows.size();
counter = 0;
for (int level = (tree_access.level_min() + 1); level <= (tree_access.level_max() + 1); ++level) {
auto level_start = tree_access.linearAccess->level_xz_vec[level - 1];
for (z = 0; z < tree_access.z_num(level - 1); z++) {
for (x = 0; x < tree_access.x_num(level - 1); ++x) {
auto offset = x + z * tree_access.x_num(level - 1);
auto xz_start = level_start + offset;
//intialize
auto begin_index = tree_access.linearAccess->xz_end_vec[xz_start - 1];
auto end_index = tree_access.linearAccess->xz_end_vec[xz_start];
if (begin_index < end_index) {
ne_rows[counter] = (x + z * tree_access.x_num(level - 1));
counter++;
}
}
}
}
}
template<typename inputType, typename treeType>
void downsample_avg(GPUAccessHelper& access, GPUAccessHelper& tree_access, inputType* input_gpu, treeType* tree_data_gpu, int* ne_rows,VectorData<int>& ne_offset) {
/// assumes input_gpu, tree_data_gpu and ne_rows are already on the device
for (int level = access.level_max(); level >= access.level_min(); --level) {
size_t ne_sz = ne_offset[level+1] - ne_offset[level];
size_t offset = ne_offset[level];
if( ne_sz == 0 ) {
continue;
}
dim3 threads_l(128, 1, 1);
dim3 blocks_l(ne_sz, 1, 1);
if(level == access.level_max()){
_fill_tree_mean_max << < blocks_l, threads_l >> >
(access.get_level_xz_vec_ptr(),
access.get_xz_end_vec_ptr(),
access.get_y_vec_ptr(),
input_gpu,
tree_access.get_level_xz_vec_ptr(),
tree_access.get_xz_end_vec_ptr(),
tree_access.get_y_vec_ptr(),
tree_data_gpu,
access.z_num(level),
access.x_num(level),
access.y_num(level),
tree_access.z_num(level-1),
tree_access.x_num(level-1),
tree_access.y_num(level-1),
level,
ne_rows + offset);
} else {
_fill_tree_mean_interior << < blocks_l, threads_l >> >
(access.get_level_xz_vec_ptr(),
access.get_xz_end_vec_ptr(),
access.get_y_vec_ptr(),
input_gpu,
tree_access.get_level_xz_vec_ptr(),
tree_access.get_xz_end_vec_ptr(),
tree_access.get_y_vec_ptr(),
tree_data_gpu,
access.z_num(level),
access.x_num(level),
access.y_num(level),
tree_access.z_num(level-1),
tree_access.x_num(level-1),
tree_access.y_num(level-1),
level,
ne_rows + offset);
}
error_check( hipDeviceSynchronize() )
}
}
template<typename inputType, typename treeType>
void downsample_avg_alt(GPUAccessHelper& access, GPUAccessHelper& tree_access, inputType* input_gpu, treeType* tree_data_gpu) {
/// assumes that access structures, input_gpu and tree_data_gpu are already on the device
for (int level = access.level_max(); level >= access.level_min(); --level) {
int x_blocks = (access.x_num(level) + 1) / 2;
int z_blocks = (access.z_num(level) + 1) / 2;
dim3 blocks_l(x_blocks, 1, z_blocks);
dim3 threads_l(32, 2, 2);
if(level == access.level_max()){
_fill_tree_mean_max_alt << < blocks_l, threads_l >> >
(access.get_level_xz_vec_ptr(),
access.get_xz_end_vec_ptr(),
access.get_y_vec_ptr(),
input_gpu,
tree_access.get_level_xz_vec_ptr(),
tree_access.get_xz_end_vec_ptr(),
tree_access.get_y_vec_ptr(),
tree_data_gpu,
access.z_num(level),
access.x_num(level),
access.y_num(level),
tree_access.z_num(level-1),
tree_access.x_num(level-1),
tree_access.y_num(level-1),
level);
} else {
_fill_tree_mean_interior_alt << < blocks_l, threads_l >> >
(access.get_level_xz_vec_ptr(),
access.get_xz_end_vec_ptr(),
access.get_y_vec_ptr(),
input_gpu,
tree_access.get_level_xz_vec_ptr(),
tree_access.get_xz_end_vec_ptr(),
tree_access.get_y_vec_ptr(),
tree_data_gpu,
access.z_num(level),
access.x_num(level),
access.y_num(level),
tree_access.z_num(level-1),
tree_access.x_num(level-1),
tree_access.y_num(level-1),
level);
}
error_check( hipDeviceSynchronize() )
}
}
template<typename inputType, typename treeType>
void downsample_avg_alt(GPUAccessHelper& access, GPUAccessHelper& tree_access, VectorData<inputType>& input, VectorData<treeType>& tree_data) {
if(tree_data.size() != tree_access.total_number_particles()) {
tree_data.resize(tree_access.total_number_particles());
}
/// allocate GPU memory
ScopedCudaMemHandler<inputType*, JUST_ALLOC> input_gpu(input.data(), input.size());
ScopedCudaMemHandler<treeType*, JUST_ALLOC> tree_data_gpu(tree_data.data(), tree_data.size());
input_gpu.copyH2D();
downsample_avg_alt(access, tree_access, input_gpu.get(), tree_data_gpu.get());
tree_data_gpu.copyD2H();
}
template<typename inputType, typename treeType>
void downsample_avg(GPUAccessHelper& access, GPUAccessHelper& tree_access, inputType* input_gpu, treeType* tree_data_gpu) {
VectorData<int> ne_counter;
ScopedCudaMemHandler<int*, JUST_ALLOC> ne_rows_gpu;
compute_ne_rows_tree_cuda<16, 32>(tree_access, ne_counter, ne_rows_gpu);
error_check( hipDeviceSynchronize() )
downsample_avg(access, tree_access, input_gpu, tree_data_gpu, ne_rows_gpu.get(), ne_counter);
}
template<typename inputType, typename treeType>
void downsample_avg(GPUAccessHelper& access, GPUAccessHelper& tree_access, VectorData<inputType>& input, VectorData<treeType>& tree_data) {
if(tree_data.size() != tree_access.total_number_particles()) {
tree_data.resize(tree_access.total_number_particles());
}
/// allocate GPU memory
ScopedCudaMemHandler<inputType*, JUST_ALLOC> input_gpu(input.data(), input.size());
ScopedCudaMemHandler<treeType*, JUST_ALLOC> tree_data_gpu(tree_data.data(), tree_data.size());
VectorData<int> ne_counter;
ScopedCudaMemHandler<int*, JUST_ALLOC> ne_rows_gpu;
compute_ne_rows_tree_cuda<16, 32>(tree_access, ne_counter, ne_rows_gpu);
input_gpu.copyH2D();
downsample_avg(access, tree_access, input_gpu.get(), tree_data_gpu.get(), ne_rows_gpu.get(), ne_counter);
tree_data_gpu.copyD2H();
}
/// instantiate templates
template void downsample_avg(GPUAccessHelper&, GPUAccessHelper&, VectorData<uint16_t>&, VectorData<float>&);
template void downsample_avg(GPUAccessHelper&, GPUAccessHelper&, VectorData<uint16_t>&, VectorData<double>&);
template void downsample_avg(GPUAccessHelper&, GPUAccessHelper&, VectorData<float>&, VectorData<float>&);
template void downsample_avg_alt(GPUAccessHelper&, GPUAccessHelper&, VectorData<uint16_t>&, VectorData<float>&);
template void downsample_avg_alt(GPUAccessHelper&, GPUAccessHelper&, VectorData<uint16_t>&, VectorData<double>&);
template void downsample_avg_alt(GPUAccessHelper&, GPUAccessHelper&, VectorData<float>&, VectorData<float>&);
template void compute_ne_rows_tree_cuda<8, 32>(GPUAccessHelper&, VectorData<int>&, ScopedCudaMemHandler<int*, JUST_ALLOC>&);
template void compute_ne_rows_tree_cuda<16, 32>(GPUAccessHelper&, VectorData<int>&, ScopedCudaMemHandler<int*, JUST_ALLOC>&);
template void compute_ne_rows_tree_cuda<32, 32>(GPUAccessHelper&, VectorData<int>&, ScopedCudaMemHandler<int*, JUST_ALLOC>&);
| 4e614542ced092e3017568cd9f20b883945dcb17.cu | //
// Created by cheesema on 05.04.18.
//
#include "APRDownsampleGPU.hpp"
template<typename inputType, typename outputType>
__global__ void _fill_tree_mean_max(const uint64_t* __restrict__ level_xz_vec,
const uint64_t* __restrict__ xz_end_vec,
const uint16_t* __restrict__ y_vec,
const inputType* __restrict__ input_particles,
const uint64_t* __restrict__ level_xz_vec_tree,
const uint64_t* __restrict__ xz_end_vec_tree,
const uint16_t* __restrict__ y_vec_tree,
outputType* __restrict__ particle_data_output,
const int z_num,
const int x_num,
const int y_num,
const int z_num_parent,
const int x_num_parent,
const int y_num_parent,
const int level,
const int* __restrict__ offset_ind) {
const int index = offset_ind[blockIdx.x];
const int z_p = index/x_num_parent;
const int x_p = index - z_p*x_num_parent;
const int x_index = (2 * x_p + threadIdx.x/64);
const int z_index = (2 * z_p + (threadIdx.x/32)%2);
const int block = threadIdx.x/32;
const int local_th = (threadIdx.x%32);
__shared__ size_t global_index_begin_0_s[4];
__shared__ size_t global_index_end_0_s[4];
__shared__ float parent_cache[8][16];
if(local_th < 16) {
parent_cache[2 * block][local_th] = 0;
parent_cache[2 * block + 1][local_th] = 0;
}
if( (x_index >= x_num) || (z_index >= z_num) ){
return; //out of bounds
}
if((local_th==0) ) {
size_t xz_start_s = x_index + z_index * x_num + level_xz_vec[level];
global_index_begin_0_s[block] = xz_end_vec[xz_start_s - 1];
global_index_end_0_s[block] = xz_end_vec[xz_start_s];
}
__syncthreads();
if(global_index_begin_0_s[0] == global_index_end_0_s[0]){
return;
}
const size_t global_index_begin_0 = global_index_begin_0_s[block];
const size_t global_index_end_0 = global_index_end_0_s[block];
float current_val = 0;
float scale_factor_xz = (((2*x_num_parent != x_num) && x_p==(x_num_parent-1) ) + ((2*z_num_parent != z_num) && z_p==(z_num_parent-1) ))*2;
if(scale_factor_xz == 0){
scale_factor_xz = 1;
}
float scale_factor_yxz = scale_factor_xz;
if((2*y_num_parent != y_num)){
scale_factor_yxz = scale_factor_xz*2;
}
size_t xz_start = x_p + z_p*x_num_parent + level_xz_vec_tree[level-1];
const size_t global_index_begin_p = xz_end_vec_tree[xz_start - 1];
const size_t global_index_end_p = xz_end_vec_tree[xz_start];
int current_y, current_y_p;
__syncwarp();
if ((global_index_begin_0 + local_th) < global_index_end_0) {
current_val = input_particles[global_index_begin_0 + local_th];
current_y = y_vec[global_index_begin_0 + local_th];
} else {
current_y = INT32_MAX;
}
__syncwarp();
if (block == 0) {
if (( global_index_begin_p + local_th) < global_index_end_p) {
current_y_p = y_vec_tree[global_index_begin_p + local_th];
} else{
current_y_p = INT32_MAX;
}
}
__syncwarp();
const int block_start = y_vec[global_index_begin_0_s[0]] / 32;
const int block_end = (y_vec[global_index_end_0_s[0] - 1] + 31) / 32;
int sparse_block = 0;
int sparse_block_p = 0;
for (int y_block = block_start; y_block < block_end; ++y_block) {
__syncthreads();
//value less then current chunk then update.
while(current_y < y_block * 32) {
sparse_block++;
if ((sparse_block * 32 + global_index_begin_0 + local_th) < global_index_end_0) {
current_val = input_particles[sparse_block * 32 + global_index_begin_0 + local_th];
current_y = y_vec[sparse_block * 32 + global_index_begin_0 + local_th];
} else {
current_y = INT32_MAX;
}
}
__syncwarp();
//update the down-sampling cache
if ((current_y < (y_block + 1) * 32) && (current_y >= y_block * 32)) {
parent_cache[2*block+current_y%2][(current_y/2) % 16] = (1.0f/8.0f)*current_val;
}
__syncwarp();
//fetch the parent particle data
if (block == 0) {
while(current_y_p < ((y_block * 32)/2)) {
sparse_block_p++;
if ((sparse_block_p * 32 + global_index_begin_p + local_th) < global_index_end_p) {
current_y_p = y_vec_tree[sparse_block_p * 32 + global_index_begin_p + local_th];
} else {
current_y_p = INT32_MAX;
}
}
}
__syncthreads();
if(block == 0) {
if ( (current_y_p < ((y_block+1) * 32)/2) ) {
if ((sparse_block_p * 32 + global_index_begin_p + local_th) < global_index_end_p) {
if(current_y_p == (y_num_parent-1)) {
particle_data_output[sparse_block_p * 32 + global_index_begin_p + local_th] =
scale_factor_yxz*( parent_cache[0][current_y_p % 16] +
parent_cache[1][current_y_p % 16] +
parent_cache[2][current_y_p % 16] +
parent_cache[3][current_y_p % 16] +
parent_cache[4][current_y_p % 16] +
parent_cache[5][current_y_p % 16] +
parent_cache[6][current_y_p % 16] +
parent_cache[7][current_y_p % 16]);
} else {
particle_data_output[sparse_block_p * 32 + global_index_begin_p + local_th] =
scale_factor_xz*( parent_cache[0][current_y_p % 16] +
parent_cache[1][current_y_p % 16] +
parent_cache[2][current_y_p % 16] +
parent_cache[3][current_y_p % 16] +
parent_cache[4][current_y_p % 16] +
parent_cache[5][current_y_p % 16] +
parent_cache[6][current_y_p % 16] +
parent_cache[7][current_y_p % 16]);
}
}
}
}
__syncthreads();
if(local_th < 16) {
parent_cache[2 * block][local_th] = 0;
parent_cache[2 * block + 1][local_th] = 0;
}
}
}
template<typename inputType, typename outputType>
__global__ void _fill_tree_mean_interior(const uint64_t* __restrict__ level_xz_vec,
const uint64_t* __restrict__ xz_end_vec,
const uint16_t* __restrict__ y_vec,
const inputType* __restrict__ input_particles,
const uint64_t* __restrict__ level_xz_vec_tree,
const uint64_t* __restrict__ xz_end_vec_tree,
const uint16_t* __restrict__ y_vec_tree,
outputType* __restrict__ particle_data_output,
const int z_num,
const int x_num,
const int y_num,
const int z_num_parent,
const int x_num_parent,
const int y_num_parent,
const int level,
const int* __restrict__ offset_ind) {
//
// This step is required for the interior down-sampling
//
const int index = offset_ind[blockIdx.x];
const int z_p = index/x_num_parent;
const int x_p = index - z_p*x_num_parent;
//Local identifiers.
int x_index = (2 * x_p + threadIdx.x/64);
int z_index = (2 * z_p + ((threadIdx.x)/32)%2);
const int block = threadIdx.x/32;
const int local_th = (threadIdx.x%32);
//Particles
__shared__ std::size_t global_index_begin_0[4];
__shared__ std::size_t global_index_end_0[4];
//Parent Tree Particle Cells
__shared__ std::size_t global_index_begin_p[4];
__shared__ std::size_t global_index_end_p[4];
//Tree Particle Cells
__shared__ std::size_t global_index_begin_t[4];
__shared__ std::size_t global_index_end_t[4];
//shared memory caches
__shared__ float parent_cache[8][16]; //16 needed padded with 17 entries to optimize for bank conflicts.
if(local_th < 16) {
parent_cache[2 * block][local_th] = 0;
parent_cache[2 * block + 1][local_th] = 0;
}
__syncwarp();
if((x_index >= x_num) || (z_index >= z_num) ){
return;
}
if(local_th == 0) {
size_t xz_start = x_index + z_index * x_num + level_xz_vec_tree[level];
global_index_begin_t[block] = xz_end_vec_tree[xz_start - 1];
global_index_end_t[block] = xz_end_vec_tree[xz_start];
}
__syncwarp();
if(local_th == 1) {
size_t xz_start = x_index + z_index * x_num + level_xz_vec[level];
global_index_begin_0[block] = xz_end_vec[xz_start - 1];
global_index_end_0[block] = xz_end_vec[xz_start];
}
__syncwarp();
if(local_th == 2) {
size_t xz_start = x_p + z_p * x_num_parent + level_xz_vec_tree[level - 1];
global_index_begin_p[block] = xz_end_vec_tree[xz_start - 1];
global_index_end_p[block] = xz_end_vec_tree[xz_start];
}
__syncthreads();
if((global_index_begin_0[block] == global_index_end_0[block]) && (global_index_begin_t[block] == global_index_end_t[block])){
return;
}
float scale_factor_xz = (((2*x_num_parent != x_num) && x_p==(x_num_parent-1) ) + ((2*z_num_parent != z_num) && z_p==(z_num_parent-1) ))*2;
if(scale_factor_xz == 0){
scale_factor_xz = 1;
}
float scale_factor_yxz = scale_factor_xz;
if((2*y_num_parent != y_num)){
scale_factor_yxz = scale_factor_xz*2;
}
int y_0, y_p, y_t;
float f_0, f_t;
__syncwarp();
if ((global_index_begin_0[block] + local_th) < global_index_end_0[block]) {
y_0 = y_vec[global_index_begin_0[block] + local_th];
f_0 = input_particles[global_index_begin_0[block] + local_th];
} else {
y_0 = INT32_MAX;
}
__syncwarp();
if ((global_index_begin_t[block] + local_th) < global_index_end_t[block]) {
y_t = y_vec_tree[global_index_begin_t[block] + local_th];
f_t = particle_data_output[global_index_begin_t[block] + local_th];
} else {
y_t = INT32_MAX;
}
__syncwarp();
if (block == 0) {
if (( global_index_begin_p[block] + local_th) < global_index_end_p[block]) {
y_p = y_vec_tree[global_index_begin_p[block] + local_th];
} else {
y_p = INT32_MAX;
}
}
__syncwarp();
const int block_start = y_vec_tree[global_index_begin_p[0]] / 16;
const int block_end = ((2 * y_vec_tree[max(global_index_end_p[0], (size_t)1) - 1] + 32) / 32); // "ceil( (2 * y_tree + 1) / 32 )"
int sparse_block = 0;
int sparse_block_p = 0;
int sparse_block_t = 0;
for (int y_block = block_start; y_block < block_end; ++y_block) {
__syncthreads();
// update apr particle
while(y_0 < (y_block * 32)) {
sparse_block++;
if ((sparse_block * 32 + global_index_begin_0[block] + local_th) < global_index_end_0[block]) {
f_0 = input_particles[sparse_block * 32 + global_index_begin_0[block] + local_th];
y_0 = y_vec[sparse_block * 32 + global_index_begin_0[block] + local_th];
} else{
y_0 = INT32_MAX;
}
}
__syncthreads();
// update tree particle
while(y_t < (y_block * 32)) {
sparse_block_t++;
if ((sparse_block_t * 32 + global_index_begin_t[block] + local_th) < global_index_end_t[block]) {
f_t = particle_data_output[sparse_block_t * 32 + global_index_begin_t[block] + local_th];
y_t = y_vec_tree[sparse_block_t * 32 + global_index_begin_t[block] + local_th];
} else{
y_t = INT32_MAX;
}
}
__syncwarp();
///update the down-sampling cache
//insert apr particles
if (y_0 < (y_block + 1) * 32) {
parent_cache[2*block + y_0 % 2][(y_0 / 2) % 16] = (1.0f / 8.0f) * f_0;
}
__syncwarp();
//insert tree particles
if (y_t < (y_block + 1) * 32) {
parent_cache[2*block + y_t % 2][(y_t / 2) % 16] = (1.0f / 8.0f) * f_t;
}
__syncwarp();
// update parent particle
if (block == 0) {
while(y_p < ((y_block * 32) / 2)) {
sparse_block_p++;
if ((sparse_block_p * 32 + global_index_begin_p[block] + local_th) < global_index_end_p[block]) {
y_p = y_vec_tree[sparse_block_p * 32 + global_index_begin_p[block] + local_th];
} else{
y_p = INT32_MAX;
}
}
}
__syncthreads();
// perform the reduction and write result to output array
if(block == 0) {
if (y_p < ((y_block + 1) * 32) / 2) { //current_y_p >= ((y_block) * 32)/2 is guaranteed from update step
if ((sparse_block_p * 32 + global_index_begin_p[block] + local_th) < global_index_end_p[block]) {
if (y_p == (y_num_parent - 1)) {
particle_data_output[sparse_block_p * 32 + global_index_begin_p[block] + local_th] =
scale_factor_yxz * (parent_cache[0][y_p % 16] +
parent_cache[1][y_p % 16] +
parent_cache[2][y_p % 16] +
parent_cache[3][y_p % 16] +
parent_cache[4][y_p % 16] +
parent_cache[5][y_p % 16] +
parent_cache[6][y_p % 16] +
parent_cache[7][y_p % 16]);
} else {
particle_data_output[sparse_block_p * 32 + global_index_begin_p[block] + local_th] =
scale_factor_xz * (parent_cache[0][y_p % 16] +
parent_cache[1][y_p % 16] +
parent_cache[2][y_p % 16] +
parent_cache[3][y_p % 16] +
parent_cache[4][y_p % 16] +
parent_cache[5][y_p % 16] +
parent_cache[6][y_p % 16] +
parent_cache[7][y_p % 16]);
}
}
}
}
__syncthreads();
// reset the cache
if(local_th < 16) {
parent_cache[2 * block][local_th] = 0;
parent_cache[2 * block + 1][local_th] = 0;
}
}
}
template<typename inputType, typename outputType>
__global__ void _fill_tree_mean_max_alt(const uint64_t* __restrict__ level_xz_vec,
const uint64_t* __restrict__ xz_end_vec,
const uint16_t* __restrict__ y_vec,
const inputType* __restrict__ input_particles,
const uint64_t* __restrict__ level_xz_vec_tree,
const uint64_t* __restrict__ xz_end_vec_tree,
const uint16_t* __restrict__ y_vec_tree,
outputType* __restrict__ particle_data_output,
const int z_num,
const int x_num,
const int y_num,
const int z_num_parent,
const int x_num_parent,
const int y_num_parent,
const int level) {
const int z_index = blockIdx.z * blockDim.z + threadIdx.z;
const int x_index = blockIdx.x * blockDim.y + threadIdx.y;
const int block = threadIdx.z * 2 + threadIdx.y;
const int local_th = threadIdx.x;
__shared__ float parent_cache[8][16];
if(local_th < 16) {
parent_cache[2 * block][local_th] = 0;
parent_cache[2 * block + 1][local_th] = 0;
}
if( (x_index >= x_num) || (z_index >= z_num) ){
return; //out of bounds
}
__shared__ size_t global_index_begin_0_s[4];
__shared__ size_t global_index_end_0_s[4];
if((local_th==0) ) {
size_t xz_start_s = x_index + z_index * x_num + level_xz_vec[level];
global_index_begin_0_s[block] = xz_end_vec[xz_start_s - 1];
global_index_end_0_s[block] = xz_end_vec[xz_start_s];
}
__syncthreads();
if(global_index_begin_0_s[0] == global_index_end_0_s[0]){
return;
}
const size_t global_index_begin_0 = global_index_begin_0_s[block];
const size_t global_index_end_0 = global_index_end_0_s[block];
float scale_factor_xz = (((2*x_num_parent != x_num) && (x_index / 2)==(x_num_parent-1) ) + ((2*z_num_parent != z_num) && (z_index / 2)==(z_num_parent-1) ))*2;
if(scale_factor_xz == 0){
scale_factor_xz = 1;
}
float scale_factor_yxz = scale_factor_xz;
if((2*y_num_parent != y_num)){
scale_factor_yxz = scale_factor_xz*2;
}
size_t xz_start = (x_index / 2) + (z_index / 2)*x_num_parent + level_xz_vec_tree[level-1];
const size_t global_index_begin_p = xz_end_vec_tree[xz_start - 1];
const size_t global_index_end_p = xz_end_vec_tree[xz_start];
int current_y;
int current_y_p;
float current_val = 0;
//initialize (i=0)
if ((global_index_begin_0 + local_th) < global_index_end_0) {
current_val = input_particles[global_index_begin_0 + local_th];
current_y = y_vec[global_index_begin_0 + local_th];
} else {
current_y = INT32_MAX;
}
if (block == 0) {
if (( global_index_begin_p + local_th) < global_index_end_p) {
current_y_p = y_vec_tree[global_index_begin_p + local_th];
} else{
current_y_p = INT32_MAX;
}
}
const int block_start = y_vec[global_index_begin_0_s[0]] / 32;
const int block_end = (y_vec[global_index_end_0_s[0]-1] + 31) / 32;
int sparse_block = 0;
int sparse_block_p = 0;
for (int y_block = block_start; y_block < block_end; ++y_block) {
__syncthreads();
//value less then current chunk then update.
while(current_y < y_block * 32) {
sparse_block++;
if ((sparse_block * 32 + global_index_begin_0 + local_th) < global_index_end_0) {
current_val = input_particles[sparse_block * 32 + global_index_begin_0 + local_th];
current_y = y_vec[sparse_block * 32 + global_index_begin_0 + local_th];
} else{
current_y = INT32_MAX;
}
}
__syncthreads();
//update the down-sampling caches
if ((current_y < (y_block + 1) * 32) && (current_y >= (y_block) * 32)) {
parent_cache[2*block+current_y%2][(current_y/2) % 16] = (1.0f/8.0f)*current_val;
}
__syncthreads();
//fetch the parent particle data
if (block == 0) {
while(current_y_p < ((y_block * 32)/2)) {
sparse_block_p++;
if ((sparse_block_p * 32 + global_index_begin_p + local_th) < global_index_end_p) {
current_y_p = y_vec_tree[sparse_block_p * 32 + global_index_begin_p + local_th];
} else {
current_y_p = INT32_MAX;
}
}
}
__syncthreads();
if(block == 0) {
if (current_y_p < ((y_block+1) * 32)/2) {
if ((sparse_block_p * 32 + global_index_begin_p + local_th) < global_index_end_p) {
if(current_y_p == (y_num_parent-1)) {
particle_data_output[sparse_block_p * 32 + global_index_begin_p + local_th] =
scale_factor_yxz*( parent_cache[0][current_y_p % 16] +
parent_cache[1][current_y_p % 16] +
parent_cache[2][current_y_p % 16] +
parent_cache[3][current_y_p % 16] +
parent_cache[4][current_y_p % 16] +
parent_cache[5][current_y_p % 16] +
parent_cache[6][current_y_p % 16] +
parent_cache[7][current_y_p % 16]);
} else {
particle_data_output[sparse_block_p * 32 + global_index_begin_p + local_th] =
scale_factor_xz*( parent_cache[0][current_y_p % 16] +
parent_cache[1][current_y_p % 16] +
parent_cache[2][current_y_p % 16] +
parent_cache[3][current_y_p % 16] +
parent_cache[4][current_y_p % 16] +
parent_cache[5][current_y_p % 16] +
parent_cache[6][current_y_p % 16] +
parent_cache[7][current_y_p % 16]);
}
}
}
}
__syncthreads();
if(local_th < 16) {
parent_cache[2 * block][local_th] = 0;
parent_cache[2 * block + 1][local_th] = 0;
}
}
}
template<typename inputType, typename outputType>
__global__ void _fill_tree_mean_interior_alt(const uint64_t* __restrict__ level_xz_vec,
const uint64_t* __restrict__ xz_end_vec,
const uint16_t* __restrict__ y_vec,
const inputType* __restrict__ input_particles,
const uint64_t* __restrict__ level_xz_vec_tree,
const uint64_t* __restrict__ xz_end_vec_tree,
const uint16_t* __restrict__ y_vec_tree,
outputType* __restrict__ particle_data_output,
const int z_num,
const int x_num,
const int y_num,
const int z_num_parent,
const int x_num_parent,
const int y_num_parent,
const int level) {
//
// This step is required for the interior down-sampling
//
const int z_index = blockIdx.z * blockDim.z + threadIdx.z;
const int x_index = blockIdx.x * blockDim.y + threadIdx.y;
const int block = threadIdx.z * 2 + threadIdx.y;
const int local_th = threadIdx.x;
//shared memory cache
__shared__ float parent_cache[8][16]; //16 needed padded with 17 entries to optimize for bank conflicts.
if(local_th < 16) {
parent_cache[2 * block][local_th] = 0;
parent_cache[2 * block + 1][local_th] = 0;
}
if((x_index >= x_num) || (z_index >= z_num) ){
return;
}
//Particles
__shared__ std::size_t global_index_begin_0[4];
__shared__ std::size_t global_index_end_0[4];
//Parent Tree Particle Cells
__shared__ std::size_t global_index_begin_p[4];
__shared__ std::size_t global_index_end_p[4];
//Interior Tree Particle Cells
__shared__ std::size_t global_index_begin_t[4];
__shared__ std::size_t global_index_end_t[4];
if(local_th == 0) {
size_t xz_start = x_index + z_index * x_num + level_xz_vec_tree[level];
global_index_begin_t[block] = xz_end_vec_tree[xz_start - 1];
global_index_end_t[block] = xz_end_vec_tree[xz_start];
}
if(local_th == 1) {
size_t xz_start = x_index + z_index * x_num + level_xz_vec[level];
global_index_begin_0[block] = xz_end_vec[xz_start - 1];
global_index_end_0[block] = xz_end_vec[xz_start];
}
if(local_th == 2) {
size_t xz_start = (x_index / 2) + (z_index / 2) * x_num_parent + level_xz_vec_tree[level - 1];
global_index_begin_p[block] = xz_end_vec_tree[xz_start - 1];
global_index_end_p[block] = xz_end_vec_tree[xz_start];
}
__syncthreads();
if((global_index_begin_0[block] == global_index_end_0[block]) && (global_index_begin_t[block] == global_index_end_t[block])){
return;
}
float scale_factor_xz = (((2*x_num_parent != x_num) && (x_index / 2)==(x_num_parent-1) ) + ((2*z_num_parent != z_num) && (z_index / 2)==(z_num_parent-1) ))*2;
if(scale_factor_xz == 0){
scale_factor_xz = 1;
}
float scale_factor_yxz = scale_factor_xz;
if((2*y_num_parent != y_num)){
scale_factor_yxz = scale_factor_xz*2;
}
int y_0, y_p, y_t;
float f_0, f_t;
__syncthreads();
//each thread grabs a particle
//from the apr
if ((global_index_begin_0[block] + local_th) < global_index_end_0[block]) {
y_0 = y_vec[global_index_begin_0[block] + local_th];
f_0 = input_particles[global_index_begin_0[block] + local_th];
} else {
y_0 = INT32_MAX;
}
__syncthreads();
//from the tree
if ((global_index_begin_t[block] + local_th) < global_index_end_t[block]) {
y_t = y_vec_tree[global_index_begin_t[block] + local_th];
f_t = particle_data_output[global_index_begin_t[block] + local_th];
} else {
y_t = INT32_MAX;
}
__syncthreads();
//parent particle (tree)
if (block == 0) {
if (( global_index_begin_p[block] + local_th) < global_index_end_p[block]) {
y_p = y_vec_tree[global_index_begin_p[block] + local_th];
} else {
y_p = INT32_MAX;
}
}
const int block_start = y_vec_tree[global_index_begin_p[0]] / 16;
const int block_end = ((2 * y_vec_tree[max(global_index_end_p[0], (size_t)1) - 1] + 32) / 32); // "ceil( (2 * y_tree + 1) / 32 )"
int sparse_block = 0;
int sparse_block_p = 0;
int sparse_block_t = 0;
for (int y_block = block_start; y_block < block_end; ++y_block) {
__syncthreads();
//value less then current chunk then update.
while(y_0 < (y_block * 32)) {
sparse_block++;
if ((sparse_block * 32 + global_index_begin_0[block] + local_th) < global_index_end_0[block]) {
f_0 = input_particles[sparse_block * 32 + global_index_begin_0[block] + local_th];
y_0 = y_vec[sparse_block * 32 + global_index_begin_0[block] + local_th];
} else{
y_0 = INT32_MAX;
}
}
__syncthreads();
//interior tree update
while(y_t < (y_block * 32)) {
sparse_block_t++;
if ((sparse_block_t * 32 + global_index_begin_t[block] + local_th) < global_index_end_t[block]) {
f_t = particle_data_output[sparse_block_t * 32 + global_index_begin_t[block] + local_th];
y_t = y_vec_tree[sparse_block_t * 32 + global_index_begin_t[block] + local_th];
} else{
y_t = INT32_MAX;
}
}
__syncthreads();
//update the down-sampling caches
if( y_0 < (y_block + 1) * 32 ) {
parent_cache[2*block + y_0 % 2][(y_0 / 2) % 16] = (1.0f / 8.0f) * f_0;
}
__syncthreads();
//now the interior tree nodes
if ( y_t < (y_block + 1) * 32 ) {
parent_cache[2*block + y_t % 2][(y_t / 2) % 16] = (1.0f / 8.0f) * f_t;
}
__syncthreads();
if (block == 0) {
while(y_p < ((y_block * 32) / 2)) {
sparse_block_p++;
if ((sparse_block_p * 32 + global_index_begin_p[block] + local_th) < global_index_end_p[block]) {
y_p = y_vec_tree[sparse_block_p * 32 + global_index_begin_p[block] + local_th];
} else {
y_p = INT32_MAX;
}
}
}
__syncthreads();
if(block == 0) {
if ( y_p < ((y_block + 1) * 32) / 2 ) {
if ((sparse_block_p * 32 + global_index_begin_p[block] + local_th) < global_index_end_p[block]) {
if (y_p == (y_num_parent - 1)) {
particle_data_output[sparse_block_p * 32 + global_index_begin_p[block] + local_th] =
scale_factor_yxz * (parent_cache[0][y_p % 16] +
parent_cache[1][y_p % 16] +
parent_cache[2][y_p % 16] +
parent_cache[3][y_p % 16] +
parent_cache[4][y_p % 16] +
parent_cache[5][y_p % 16] +
parent_cache[6][y_p % 16] +
parent_cache[7][y_p % 16]);
} else {
particle_data_output[sparse_block_p * 32 + global_index_begin_p[block] + local_th] =
scale_factor_xz * (parent_cache[0][y_p % 16] +
parent_cache[1][y_p % 16] +
parent_cache[2][y_p % 16] +
parent_cache[3][y_p % 16] +
parent_cache[4][y_p % 16] +
parent_cache[5][y_p % 16] +
parent_cache[6][y_p % 16] +
parent_cache[7][y_p % 16]);
}
}
}
}
__syncthreads();
if(local_th < 16) {
parent_cache[2 * block][local_th] = 0;
parent_cache[2 * block + 1][local_th] = 0;
}
}
}
template<int blockSize_z, int blockSize_x>
__global__ void _count_ne_rows_tree_cuda(const uint64_t* __restrict__ level_xz_vec_tree,
const uint64_t* __restrict__ xz_end_vec_tree,
const int z_num,
const int x_num,
const int level,
int* __restrict__ res) {
__shared__ int local_counts[blockSize_x][blockSize_z];
local_counts[threadIdx.y][threadIdx.x] = 0;
const int z_index = blockIdx.x * blockDim.x + threadIdx.x;
if(z_index >= z_num) { return; } // out of bounds
size_t level_start = level_xz_vec_tree[level];
int x_index = threadIdx.y;
int counter = 0;
// loop over x-dimension in chunks
while( x_index < x_num ) {
size_t xz_start = z_index * x_num + x_index + level_start;
// if row is non-empty
if( xz_end_vec_tree[xz_start - 1] < xz_end_vec_tree[xz_start]) {
counter++;
}
x_index += blockDim.y;
}
__syncthreads();
local_counts[threadIdx.y][threadIdx.x] = counter;
__syncthreads();
// reduce over blockDim.y to get the count for each z_index
for(int gap = blockSize_x/2; gap > 0; gap/=2) {
if(threadIdx.y < gap) {
local_counts[threadIdx.y][threadIdx.x] += local_counts[threadIdx.y + gap][threadIdx.x];
}
__syncthreads();
}
// now reduce over blockDim.x to get the block count
for(int gap = blockSize_z/2; gap > 0; gap/=2) {
if(threadIdx.x < gap && threadIdx.y == 0) {
local_counts[0][threadIdx.x] += local_counts[0][threadIdx.x + gap];
}
__syncthreads();
}
if(threadIdx.x == 0 && threadIdx.y == 0) {
res[blockIdx.x] = local_counts[0][0];
}
}
__device__ unsigned int count = 0;
__global__ void _fill_ne_rows_tree_cuda(const uint64_t* __restrict__ level_xz_vec_tree,
const uint64_t* __restrict__ xz_end_vec_tree,
const int z_num,
const int x_num,
const int level,
unsigned int ne_count,
unsigned int offset,
int* __restrict__ ne_rows) {
const int z_index = blockIdx.x * blockDim.x + threadIdx.x;
if (z_index >= z_num) { return; } // out of bounds
size_t level_start = level_xz_vec_tree[level];
int x_index = threadIdx.y;
// loop over x-dimension in chunks
while (x_index < x_num) {
size_t xz_start = z_index * x_num + x_index + level_start;
// if row is non-empty
if( xz_end_vec_tree[xz_start - 1] < xz_end_vec_tree[xz_start]) {
unsigned int index = atomicInc(&count, ne_count-1);
ne_rows[offset + index] = z_index * x_num + x_index;
}
x_index += blockDim.y;
}
}
template<int blockSize_z, int blockSize_x>
void compute_ne_rows_tree_cuda(GPUAccessHelper& tree_access, VectorData<int>& ne_count, ScopedCudaMemHandler<int*, JUST_ALLOC>& ne_rows_gpu) {
ne_count.resize(tree_access.level_max() + 3);
int z_blocks_max = (tree_access.z_num(tree_access.level_max()) + blockSize_z - 1) / blockSize_z;
int num_levels = tree_access.level_max() - tree_access.level_min() + 1;
int block_sums_host[z_blocks_max * num_levels];
int *block_sums_device;
error_check(cudaMalloc(&block_sums_device, z_blocks_max*num_levels*sizeof(int)) )
error_check( cudaMemset(block_sums_device, 0, z_blocks_max*num_levels*sizeof(int)) )
// error_check( cudaDeviceSynchronize() )
int offset = 0;
for(int level = tree_access.level_min(); level <= tree_access.level_max(); ++level) {
int z_blocks = (tree_access.z_num(level) + blockSize_z - 1) / blockSize_z;
dim3 grid_dim(z_blocks, 1, 1);
dim3 block_dim(blockSize_z, blockSize_x, 1);
_count_ne_rows_tree_cuda<blockSize_z, blockSize_x>
<< < grid_dim, block_dim >> >
(tree_access.get_level_xz_vec_ptr(),
tree_access.get_xz_end_vec_ptr(),
tree_access.z_num(level),
tree_access.x_num(level),
level,
block_sums_device + offset);
offset += z_blocks_max;
}
error_check(cudaDeviceSynchronize())
error_check(cudaMemcpy(block_sums_host, block_sums_device, z_blocks_max * num_levels * sizeof(int), cudaMemcpyDeviceToHost) )
int counter = 0;
offset = 0;
for(int level = tree_access.level_min(); level <= tree_access.level_max(); ++level) {
ne_count[level+1] = counter;
for(int i = 0; i < z_blocks_max; ++i) {
counter += block_sums_host[offset + i];
}
offset += z_blocks_max;
}
ne_count.back() = counter;
ne_rows_gpu.initialize(NULL, counter);
for(int level = (tree_access.level_min() + 1); level <= (tree_access.level_max() + 1); ++level) {
int ne_sz = ne_count[level+1] - ne_count[level];
if( ne_sz == 0 ) {
continue;
}
int z_blocks = (tree_access.z_num(level - 1) + blockSize_z - 1) / blockSize_z;
dim3 grid_dim(z_blocks, 1, 1);
dim3 block_dim(blockSize_z, blockSize_x, 1);
_fill_ne_rows_tree_cuda<<< grid_dim, block_dim >>>
(tree_access.get_level_xz_vec_ptr(),
tree_access.get_xz_end_vec_ptr(),
tree_access.z_num(level-1),
tree_access.x_num(level-1),
level-1,
ne_sz,
ne_count[level],
ne_rows_gpu.get());
}
error_check(cudaFree(block_sums_device) )
}
void compute_ne_rows_tree(GPUAccessHelper& tree_access, VectorData<int>& ne_counter, VectorData<int>& ne_rows) {
ne_counter.resize(tree_access.level_max() + 3);
int z = 0;
int x = 0;
uint64_t counter = 0;
for (int level = (tree_access.level_min() + 1); level <= (tree_access.level_max() + 1); ++level) {
auto level_start = tree_access.linearAccess->level_xz_vec[level - 1];
ne_counter[level] = counter;
for (z = 0; z < tree_access.z_num(level - 1); z++) {
for (x = 0; x < tree_access.x_num(level - 1); ++x) {
auto offset = x + z * tree_access.x_num(level - 1);
auto xz_start = level_start + offset;
auto begin_index = tree_access.linearAccess->xz_end_vec[xz_start - 1];
auto end_index = tree_access.linearAccess->xz_end_vec[xz_start];
if (begin_index < end_index) {
counter++;
}
}
}
}
ne_rows.resize(counter);
ne_counter.back() = ne_rows.size();
counter = 0;
for (int level = (tree_access.level_min() + 1); level <= (tree_access.level_max() + 1); ++level) {
auto level_start = tree_access.linearAccess->level_xz_vec[level - 1];
for (z = 0; z < tree_access.z_num(level - 1); z++) {
for (x = 0; x < tree_access.x_num(level - 1); ++x) {
auto offset = x + z * tree_access.x_num(level - 1);
auto xz_start = level_start + offset;
//intialize
auto begin_index = tree_access.linearAccess->xz_end_vec[xz_start - 1];
auto end_index = tree_access.linearAccess->xz_end_vec[xz_start];
if (begin_index < end_index) {
ne_rows[counter] = (x + z * tree_access.x_num(level - 1));
counter++;
}
}
}
}
}
template<typename inputType, typename treeType>
void downsample_avg(GPUAccessHelper& access, GPUAccessHelper& tree_access, inputType* input_gpu, treeType* tree_data_gpu, int* ne_rows,VectorData<int>& ne_offset) {
/// assumes input_gpu, tree_data_gpu and ne_rows are already on the device
for (int level = access.level_max(); level >= access.level_min(); --level) {
size_t ne_sz = ne_offset[level+1] - ne_offset[level];
size_t offset = ne_offset[level];
if( ne_sz == 0 ) {
continue;
}
dim3 threads_l(128, 1, 1);
dim3 blocks_l(ne_sz, 1, 1);
if(level == access.level_max()){
_fill_tree_mean_max << < blocks_l, threads_l >> >
(access.get_level_xz_vec_ptr(),
access.get_xz_end_vec_ptr(),
access.get_y_vec_ptr(),
input_gpu,
tree_access.get_level_xz_vec_ptr(),
tree_access.get_xz_end_vec_ptr(),
tree_access.get_y_vec_ptr(),
tree_data_gpu,
access.z_num(level),
access.x_num(level),
access.y_num(level),
tree_access.z_num(level-1),
tree_access.x_num(level-1),
tree_access.y_num(level-1),
level,
ne_rows + offset);
} else {
_fill_tree_mean_interior << < blocks_l, threads_l >> >
(access.get_level_xz_vec_ptr(),
access.get_xz_end_vec_ptr(),
access.get_y_vec_ptr(),
input_gpu,
tree_access.get_level_xz_vec_ptr(),
tree_access.get_xz_end_vec_ptr(),
tree_access.get_y_vec_ptr(),
tree_data_gpu,
access.z_num(level),
access.x_num(level),
access.y_num(level),
tree_access.z_num(level-1),
tree_access.x_num(level-1),
tree_access.y_num(level-1),
level,
ne_rows + offset);
}
error_check( cudaDeviceSynchronize() )
}
}
template<typename inputType, typename treeType>
void downsample_avg_alt(GPUAccessHelper& access, GPUAccessHelper& tree_access, inputType* input_gpu, treeType* tree_data_gpu) {
/// assumes that access structures, input_gpu and tree_data_gpu are already on the device
for (int level = access.level_max(); level >= access.level_min(); --level) {
int x_blocks = (access.x_num(level) + 1) / 2;
int z_blocks = (access.z_num(level) + 1) / 2;
dim3 blocks_l(x_blocks, 1, z_blocks);
dim3 threads_l(32, 2, 2);
if(level == access.level_max()){
_fill_tree_mean_max_alt << < blocks_l, threads_l >> >
(access.get_level_xz_vec_ptr(),
access.get_xz_end_vec_ptr(),
access.get_y_vec_ptr(),
input_gpu,
tree_access.get_level_xz_vec_ptr(),
tree_access.get_xz_end_vec_ptr(),
tree_access.get_y_vec_ptr(),
tree_data_gpu,
access.z_num(level),
access.x_num(level),
access.y_num(level),
tree_access.z_num(level-1),
tree_access.x_num(level-1),
tree_access.y_num(level-1),
level);
} else {
_fill_tree_mean_interior_alt << < blocks_l, threads_l >> >
(access.get_level_xz_vec_ptr(),
access.get_xz_end_vec_ptr(),
access.get_y_vec_ptr(),
input_gpu,
tree_access.get_level_xz_vec_ptr(),
tree_access.get_xz_end_vec_ptr(),
tree_access.get_y_vec_ptr(),
tree_data_gpu,
access.z_num(level),
access.x_num(level),
access.y_num(level),
tree_access.z_num(level-1),
tree_access.x_num(level-1),
tree_access.y_num(level-1),
level);
}
error_check( cudaDeviceSynchronize() )
}
}
template<typename inputType, typename treeType>
void downsample_avg_alt(GPUAccessHelper& access, GPUAccessHelper& tree_access, VectorData<inputType>& input, VectorData<treeType>& tree_data) {
if(tree_data.size() != tree_access.total_number_particles()) {
tree_data.resize(tree_access.total_number_particles());
}
/// allocate GPU memory
ScopedCudaMemHandler<inputType*, JUST_ALLOC> input_gpu(input.data(), input.size());
ScopedCudaMemHandler<treeType*, JUST_ALLOC> tree_data_gpu(tree_data.data(), tree_data.size());
input_gpu.copyH2D();
downsample_avg_alt(access, tree_access, input_gpu.get(), tree_data_gpu.get());
tree_data_gpu.copyD2H();
}
template<typename inputType, typename treeType>
void downsample_avg(GPUAccessHelper& access, GPUAccessHelper& tree_access, inputType* input_gpu, treeType* tree_data_gpu) {
VectorData<int> ne_counter;
ScopedCudaMemHandler<int*, JUST_ALLOC> ne_rows_gpu;
compute_ne_rows_tree_cuda<16, 32>(tree_access, ne_counter, ne_rows_gpu);
error_check( cudaDeviceSynchronize() )
downsample_avg(access, tree_access, input_gpu, tree_data_gpu, ne_rows_gpu.get(), ne_counter);
}
template<typename inputType, typename treeType>
void downsample_avg(GPUAccessHelper& access, GPUAccessHelper& tree_access, VectorData<inputType>& input, VectorData<treeType>& tree_data) {
if(tree_data.size() != tree_access.total_number_particles()) {
tree_data.resize(tree_access.total_number_particles());
}
/// allocate GPU memory
ScopedCudaMemHandler<inputType*, JUST_ALLOC> input_gpu(input.data(), input.size());
ScopedCudaMemHandler<treeType*, JUST_ALLOC> tree_data_gpu(tree_data.data(), tree_data.size());
VectorData<int> ne_counter;
ScopedCudaMemHandler<int*, JUST_ALLOC> ne_rows_gpu;
compute_ne_rows_tree_cuda<16, 32>(tree_access, ne_counter, ne_rows_gpu);
input_gpu.copyH2D();
downsample_avg(access, tree_access, input_gpu.get(), tree_data_gpu.get(), ne_rows_gpu.get(), ne_counter);
tree_data_gpu.copyD2H();
}
/// instantiate templates
template void downsample_avg(GPUAccessHelper&, GPUAccessHelper&, VectorData<uint16_t>&, VectorData<float>&);
template void downsample_avg(GPUAccessHelper&, GPUAccessHelper&, VectorData<uint16_t>&, VectorData<double>&);
template void downsample_avg(GPUAccessHelper&, GPUAccessHelper&, VectorData<float>&, VectorData<float>&);
template void downsample_avg_alt(GPUAccessHelper&, GPUAccessHelper&, VectorData<uint16_t>&, VectorData<float>&);
template void downsample_avg_alt(GPUAccessHelper&, GPUAccessHelper&, VectorData<uint16_t>&, VectorData<double>&);
template void downsample_avg_alt(GPUAccessHelper&, GPUAccessHelper&, VectorData<float>&, VectorData<float>&);
template void compute_ne_rows_tree_cuda<8, 32>(GPUAccessHelper&, VectorData<int>&, ScopedCudaMemHandler<int*, JUST_ALLOC>&);
template void compute_ne_rows_tree_cuda<16, 32>(GPUAccessHelper&, VectorData<int>&, ScopedCudaMemHandler<int*, JUST_ALLOC>&);
template void compute_ne_rows_tree_cuda<32, 32>(GPUAccessHelper&, VectorData<int>&, ScopedCudaMemHandler<int*, JUST_ALLOC>&);
|
d2b24c064e01178f1320ef6bd5d7a8c1926b709f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* @brief: this file contains the definition of svm trainer class
* Created on: May 24, 2012
* Author: Zeyi Wen
* Copyright @DBGroup University of Melbourne
*/
#include "../svm-shared/svmTrainer.h"
#include "time.h"
#include "../svm-shared/gpu_global_utility.h"
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime_api.h>
#include <sys/time.h>
long nTimeOfLoop = 0;
long nTimeOfPrep = 0;
/*
* @brief: train svm model. Training data is consisted of two parts for n-fold cross validation scenario;
* @param: model: the svm model of the training
* @param: pfDevYiFValueSubset: gradient of sub set of samples (training samples are sub set of the whole samples)
*/
int nInterater = 0;
bool CSVMTrainer::TrainModel(svm_model &model, float_point *pfDevYiFValueSubset,
float_point *pfDevAlphaSubset, int *pnDevLabelSubset,
int nNumofInstance, float_point *pfP)
{
bool bReturn = true;
assert(nNumofInstance > 0 && pfDevAlphaSubset != NULL &&
pfDevYiFValueSubset != NULL && pnDevLabelSubset != NULL);
/*************** prepare to perform training *************/
TrainStarting(nNumofInstance, nNumofInstance, pfDevYiFValueSubset, pfDevAlphaSubset, pnDevLabelSubset);
//start training process
int nIter = 0;
int nMaxIter = (nNumofInstance > INT_MAX / ITERATION_FACTOR ? INT_MAX : ITERATION_FACTOR * nNumofInstance) * 4;
int nSelectFirstSample = -1;
int nSelectSecondeSample = -1;
timespec timeLoopS, timeLoopE;
clock_gettime(CLOCK_REALTIME, &timeLoopS);
while(nIter < nMaxIter)
{
int nEnd = m_pSMOSolver->Iterate(pfDevYiFValueSubset, pfDevAlphaSubset, pnDevLabelSubset, nNumofInstance);
if(nEnd == 1)
{
cout << " Done" << endl;
break;
}
if(nIter % 1000 == 0 && nIter != 0)
{
cout << ".";
cout.flush();
}
nIter++;
}
clock_gettime(CLOCK_REALTIME, &timeLoopE);
long lTempLoop = ((timeLoopE.tv_sec - timeLoopS.tv_sec) * 1e9 + (timeLoopE.tv_nsec - timeLoopS.tv_nsec));
if(lTempLoop > 0)
nTimeOfLoop += lTempLoop;
else
cout << "loop timer error" << endl;
TrainEnding(nIter, nNumofInstance, nNumofInstance, model,
pnDevLabelSubset, pfDevAlphaSubset, pfDevYiFValueSubset, pfP);
//can't find a optimal classifier
if(nIter == nMaxIter)
{
bReturn = false;
}
return bReturn;
}
| d2b24c064e01178f1320ef6bd5d7a8c1926b709f.cu | /*
* @brief: this file contains the definition of svm trainer class
* Created on: May 24, 2012
* Author: Zeyi Wen
* Copyright @DBGroup University of Melbourne
*/
#include "../svm-shared/svmTrainer.h"
#include "time.h"
#include "../svm-shared/gpu_global_utility.h"
#include <cuda.h>
#include <helper_cuda.h>
#include <cuda_runtime_api.h>
#include <cuda_profiler_api.h>
#include <sys/time.h>
long nTimeOfLoop = 0;
long nTimeOfPrep = 0;
/*
* @brief: train svm model. Training data is consisted of two parts for n-fold cross validation scenario;
* @param: model: the svm model of the training
* @param: pfDevYiFValueSubset: gradient of sub set of samples (training samples are sub set of the whole samples)
*/
int nInterater = 0;
bool CSVMTrainer::TrainModel(svm_model &model, float_point *pfDevYiFValueSubset,
float_point *pfDevAlphaSubset, int *pnDevLabelSubset,
int nNumofInstance, float_point *pfP)
{
bool bReturn = true;
assert(nNumofInstance > 0 && pfDevAlphaSubset != NULL &&
pfDevYiFValueSubset != NULL && pnDevLabelSubset != NULL);
/*************** prepare to perform training *************/
TrainStarting(nNumofInstance, nNumofInstance, pfDevYiFValueSubset, pfDevAlphaSubset, pnDevLabelSubset);
//start training process
int nIter = 0;
int nMaxIter = (nNumofInstance > INT_MAX / ITERATION_FACTOR ? INT_MAX : ITERATION_FACTOR * nNumofInstance) * 4;
int nSelectFirstSample = -1;
int nSelectSecondeSample = -1;
timespec timeLoopS, timeLoopE;
clock_gettime(CLOCK_REALTIME, &timeLoopS);
while(nIter < nMaxIter)
{
int nEnd = m_pSMOSolver->Iterate(pfDevYiFValueSubset, pfDevAlphaSubset, pnDevLabelSubset, nNumofInstance);
if(nEnd == 1)
{
cout << " Done" << endl;
break;
}
if(nIter % 1000 == 0 && nIter != 0)
{
cout << ".";
cout.flush();
}
nIter++;
}
clock_gettime(CLOCK_REALTIME, &timeLoopE);
long lTempLoop = ((timeLoopE.tv_sec - timeLoopS.tv_sec) * 1e9 + (timeLoopE.tv_nsec - timeLoopS.tv_nsec));
if(lTempLoop > 0)
nTimeOfLoop += lTempLoop;
else
cout << "loop timer error" << endl;
TrainEnding(nIter, nNumofInstance, nNumofInstance, model,
pnDevLabelSubset, pfDevAlphaSubset, pfDevYiFValueSubset, pfP);
//can't find a optimal classifier
if(nIter == nMaxIter)
{
bReturn = false;
}
return bReturn;
}
|
f8ba170b60df65856e2df7170f593e2d521eeacc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/ceil_div.h>
#include <ATen/hip/detail/TensorInfo.cuh>
#include <ATen/hip/detail/OffsetCalculator.cuh>
#include <ATen/hip/ScanUtils.cuh>
#include <ATen/native/Resize.h>
#include <ATen/native/hip/SortingCommon.cuh>
#include <ATen/native/hip/SortingRadixSelect.cuh>
#include <ATen/native/hip/SortUtils.cuh>
#include <THH/THHTensorMathReduce.cuh> // for AddOp
#include <c10/macros/Macros.h>
using namespace at::native;
namespace at {
namespace native {
namespace {
template <typename T, typename IndexType, int Dim, bool Order>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void gatherTopK(at::cuda::detail::TensorInfo<T, IndexType> input,
IndexType inputSliceSize,
IndexType outputSliceSize, // aka `k`
IndexType numInputSlices,
IndexType inputWithinSliceStride,
at::cuda::detail::TensorInfo<T, IndexType> topK,
IndexType numTopKSlices,
IndexType topKWithinSliceStride,
at::cuda::detail::TensorInfo<int64_t, IndexType> indices,
IndexType indicesWithinSliceStride) {
// Indices are limited to integer fp precision, so counts can fit in
// int32, regardless of IndexType
#if defined(USE_ROCM)
__shared__ int smem[64];
#else
__shared__ int smem[32]; // one per each warp, up to warp limit
#endif
IndexType slice = getLinearBlockId<IndexType>();
if (slice >= numInputSlices) {
return;
}
// Find the start offset for our slice
IndexType sliceStartIndex =
at::cuda::detail::IndexToOffset<T, IndexType, Dim>::get(slice, input);
IndexType topKSliceStartIndex =
at::cuda::detail::IndexToOffset<T, IndexType, Dim>::get(slice, topK);
IndexType indicesSliceStartIndex =
at::cuda::detail::IndexToOffset<int64_t, IndexType, Dim>::get(slice, indices);
T* inputSliceStart = &input.data[sliceStartIndex];
T* topKSliceStart = &topK.data[topKSliceStartIndex];
int64_t* indicesSliceStart = &indices.data[indicesSliceStartIndex];
// Find the k-th highest element in our input
T topKValue = static_cast<T>(0);
radixSelect<T, typename TopKTypeConfig<T>::RadixType, IndexType, Order>(
inputSliceStart, outputSliceSize,
inputSliceSize, inputWithinSliceStride,
smem, &topKValue);
const auto topKConverted = at::native::TopKTypeConfig<T>::convert(topKValue);
// Every value that is strictly less/greater than `pattern`
// (depending on sort dir) in sorted int format is in the top-K.
// The top-K value itself might not be unique.
//
// Since there are a variable number of elements that we see that
// are within the top-k, we don't know at what index to write out
// the resulting values.
// In order to get this, we perform an exclusive prefix sum of
// `hasTopK`. This will return the resulting index into which we
// need to write the result, if a thread has a result.
// All threads need to participate in the loop and the prefix sum,
// but not necessarily in the load; hence loop bounds being rounded
// up to a multiple of the block dim.
IndexType numIterations = round_up(inputSliceSize, (IndexType) blockDim.x);
IndexType writeIndexStart = 0;
for (IndexType i = threadIdx.x; i < numIterations; i += blockDim.x) {
bool inRange = (i < inputSliceSize);
T v =
inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride]) : static_cast<T>(0);
const auto convertedV = at::native::TopKTypeConfig<T>::convert(v);
bool hasTopK;
if (Order) {
hasTopK = inRange && (convertedV > topKConverted);
} else {
hasTopK = inRange && (convertedV < topKConverted);
}
int index;
int carry;
at::cuda::exclusiveBinaryPrefixScan<int, true>(
smem, hasTopK, &index, &carry, AddOp<int>());
if (hasTopK) {
int writeIndex = writeIndexStart + index;
CUDA_KERNEL_ASSERT(writeIndex < outputSliceSize);
IndexType topKOffset = writeIndex * topKWithinSliceStride;
IndexType indexOffset = writeIndex * indicesWithinSliceStride;
topKSliceStart[topKOffset] = v;
indicesSliceStart[indexOffset] = i;
}
writeIndexStart += carry;
}
// We need to fill in the rest with actual == top-K values.
// The number that we need is outputSliceSize -
// writeIndexStart. There might be more than that number available,
// in which case we have to choose the first seen set. We do this
// via a prefix sum to calculate indices for writing results.
CUDA_KERNEL_ASSERT(outputSliceSize >= writeIndexStart);
IndexType topKRemaining = (outputSliceSize - writeIndexStart);
for (IndexType i = threadIdx.x; i < numIterations; i += blockDim.x) {
bool inRange = (i < inputSliceSize);
T v =
inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride]) : static_cast<T>(0);
const auto convertedV = at::native::TopKTypeConfig<T>::convert(v);
bool hasTopK = inRange && (convertedV == topKConverted);
int index;
int carry;
at::cuda::exclusiveBinaryPrefixScan<int, true>(
smem, hasTopK, &index, &carry, AddOp<int>());
if (hasTopK && index < topKRemaining) {
int writeIndex = writeIndexStart + index;
CUDA_KERNEL_ASSERT(writeIndex < outputSliceSize);
IndexType topKOffset = writeIndex * topKWithinSliceStride;
IndexType indexOffset = writeIndex * indicesWithinSliceStride;
topKSliceStart[topKOffset] = v;
indicesSliceStart[indexOffset] = i;
}
if (carry >= topKRemaining) {
break;
}
topKRemaining -= carry;
writeIndexStart += carry;
}
};
} // namespace
TORCH_IMPL_FUNC(topk_out_cuda)
(const Tensor& self,
int64_t k, int64_t dim, bool largest, bool sorted,
const Tensor& values,
const Tensor& indices) {
TensorArg topK_arg{values, "topK", 1}, indices_arg{indices, "indices", 2}, input_arg{self, "self", 3};
checkAllSameGPU(__func__, {topK_arg, indices_arg, input_arg});
dim = at::maybe_wrap_dim(dim, self);
int numDims = self.dim();
numDims = numDims == 0 ? 1 : numDims;
TORCH_CHECK(numDims <= MAX_DIMS, "input tensor has too many dimensions");
int64_t sliceSize = self.dim() == 0 ? 1 : self.size(dim);
Tensor input = self.contiguous();
// If k is 0 the result is an empty tensor, so we don't need to launch a kernel.
if (k == 0) {
return;
}
// static_cast is required to ensure that the correct type (INDEX_T)
// is provided to the kernel for the arguments.
#define RUN_K(INDEX_T, DIM, DIR) \
hipLaunchKernelGGL(( gatherTopK<scalar_t, INDEX_T, DIM, DIR>) \
, dim3(grid), dim3(block), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \
inputInfo, \
static_cast<INDEX_T>(sliceSize), \
static_cast<INDEX_T>(k), \
static_cast<INDEX_T>(inputSlices), \
/* The actual dimension that the k-selection is running in */ \
/* may have changed from collapseDims() */ \
static_cast<INDEX_T>(inputInfo.strides[collapseInputDim]), \
topKInfo, \
static_cast<INDEX_T>(topKSlices), \
static_cast<INDEX_T>(topKInfo.strides[collapseTopKDim]), \
indicesInfo, \
static_cast<INDEX_T>(indicesInfo.strides[collapseIndicesDim])); \
C10_HIP_KERNEL_LAUNCH_CHECK();
#define RUN_DIR(INDEX_T, DIM) \
if (largest) { \
RUN_K(INDEX_T, DIM, true); \
} else { \
RUN_K(INDEX_T, DIM, false); \
}
#define RUN_DIM(INDEX_T) \
if (allDims == 1) { \
RUN_DIR(INDEX_T, 1); \
} else if (allDims == 2) { \
RUN_DIR(INDEX_T, 2); \
} else if (allDims == 3) { \
RUN_DIR(INDEX_T, 3); \
} else { \
RUN_DIR(INDEX_T, -1); \
}
#define RUN_T(INDEX_T) \
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "topk_out_cuda", [&] { \
at::cuda::detail::TensorInfo<scalar_t, INDEX_T> inputInfo = \
at::cuda::detail::getTensorInfo<scalar_t, INDEX_T>(input); \
at::cuda::detail::TensorInfo<scalar_t, INDEX_T> topKInfo = \
at::cuda::detail::getTensorInfo<scalar_t, INDEX_T>(values); \
at::cuda::detail::TensorInfo<int64_t, INDEX_T> indicesInfo = \
at::cuda::detail::getTensorInfo<int64_t, INDEX_T>(indices); \
/* tensorInfoLegacyIfScalar*/ \
if (!input.dim()) { \
inputInfo.dims = 1; \
inputInfo.sizes[0] = 1; \
inputInfo.strides[0] = 1; \
topKInfo.dims = 1; \
topKInfo.sizes[0] = 1; \
topKInfo.strides[0] = 1; \
indicesInfo.dims = 1; \
indicesInfo.sizes[0] = 1; \
indicesInfo.strides[0] = 1; \
} \
/* We use these structures solely to find the offset to */ \
/* each slice we are operating on */ \
inputInfo.sizes[dim] = 1; \
topKInfo.sizes[dim] = 1; \
indicesInfo.sizes[dim] = 1; \
/* stash the stride of dim because it can be accidentally collapsed */ \
auto strideTopK = topKInfo.strides[dim]; \
auto strideIndices = indicesInfo.strides[dim]; \
/* Collapse all other dims */ \
int collapseInputDim = inputInfo.collapseDims(dim); \
int collapseTopKDim = topKInfo.collapseDims(dim); \
int collapseIndicesDim = indicesInfo.collapseDims(dim); \
/* restore stride in case it was collapsed */ \
topKInfo.strides[collapseTopKDim] = strideTopK; \
indicesInfo.strides[collapseIndicesDim] = strideIndices; \
int64_t inputSlices = 1; \
for (int i = 0; i < inputInfo.dims; ++i) { \
inputSlices *= inputInfo.sizes[i]; \
} \
int64_t topKSlices = 1; \
for (int i = 0; i < topKInfo.dims; ++i) { \
topKSlices *= topKInfo.sizes[i]; \
} \
\
dim3 grid; \
TORCH_INTERNAL_ASSERT(getGridFromTiles(inputSlices, grid), "Too many slices to sort"); \
\
dim3 block(::min(at::ceil_div(sliceSize, (int64_t) C10_WARP_SIZE)*(int64_t) C10_WARP_SIZE, (int64_t) 1024)); \
\
/* This is used as a template parameter to calculate indices. */ \
/* We only specialize it if all collapsed dim sizes are the */ \
/* same; otherwise, we use -1 which is the specialization */ \
/* parameter for arbitrary dimensions */ \
int allDims = inputInfo.dims; \
if (topKInfo.dims != allDims || indicesInfo.dims != allDims) { \
allDims = -1; \
} \
\
RUN_DIM(INDEX_T); \
});
// the below is safe with 0-dimensional tensors because it is based on
// TensorInfo which implicitly expands to 1-dimensional.
if (input.numel() > 0) {
// Based on required index size, run the algorithm with the
// appropriate index type
if (at::cuda::detail::canUse32BitIndexMath(input) &&
at::cuda::detail::canUse32BitIndexMath(values) &&
at::cuda::detail::canUse32BitIndexMath(indices)) {
RUN_T(uint32_t);
} else {
RUN_T(uint64_t);
}
}
#undef RUN_T
#undef RUN_DIM
#undef RUN_DIR
#undef RUN_K
// Sort the results if the user wants them sorted, since our
// selection routine does not ensure sorting
if (sorted && values.numel() > 1) {
if (should_use_small_sort(values, dim)) {
// This avoids any memory allocations and performs all sorting
// work inplace along the slice
sortKeyValueInplace(values, indices, dim, largest);
} else {
// Depend upon the backup sort that returns indices, which we
// can use in conjunction with gather to produce the original
// indices.
// This is not the most efficient implementation, especially since
// there are memory allocations performed here. If the user desires
// greater performance, they should torch.gather() the results
// themselves using the reported indices, providing previously
// allocated tensors to receive the results.
Tensor sortedIndices = at::empty_like(indices);
Tensor sortedValues = at::empty_like(values);
sort_out_cuda(values, dim, largest, sortedValues, sortedIndices);
indices.copy_(indices.gather(dim, sortedIndices));
values.copy_(sortedValues);
}
}
}
} // at::native
} // at
| f8ba170b60df65856e2df7170f593e2d521eeacc.cu | #include <ATen/ATen.h>
#include <ATen/ceil_div.h>
#include <ATen/cuda/detail/TensorInfo.cuh>
#include <ATen/cuda/detail/OffsetCalculator.cuh>
#include <ATen/cuda/ScanUtils.cuh>
#include <ATen/native/Resize.h>
#include <ATen/native/cuda/SortingCommon.cuh>
#include <ATen/native/cuda/SortingRadixSelect.cuh>
#include <ATen/native/cuda/SortUtils.cuh>
#include <THC/THCTensorMathReduce.cuh> // for AddOp
#include <c10/macros/Macros.h>
using namespace at::native;
namespace at {
namespace native {
namespace {
template <typename T, typename IndexType, int Dim, bool Order>
C10_LAUNCH_BOUNDS_1(1024)
__global__ void gatherTopK(at::cuda::detail::TensorInfo<T, IndexType> input,
IndexType inputSliceSize,
IndexType outputSliceSize, // aka `k`
IndexType numInputSlices,
IndexType inputWithinSliceStride,
at::cuda::detail::TensorInfo<T, IndexType> topK,
IndexType numTopKSlices,
IndexType topKWithinSliceStride,
at::cuda::detail::TensorInfo<int64_t, IndexType> indices,
IndexType indicesWithinSliceStride) {
// Indices are limited to integer fp precision, so counts can fit in
// int32, regardless of IndexType
#if defined(USE_ROCM)
__shared__ int smem[64];
#else
__shared__ int smem[32]; // one per each warp, up to warp limit
#endif
IndexType slice = getLinearBlockId<IndexType>();
if (slice >= numInputSlices) {
return;
}
// Find the start offset for our slice
IndexType sliceStartIndex =
at::cuda::detail::IndexToOffset<T, IndexType, Dim>::get(slice, input);
IndexType topKSliceStartIndex =
at::cuda::detail::IndexToOffset<T, IndexType, Dim>::get(slice, topK);
IndexType indicesSliceStartIndex =
at::cuda::detail::IndexToOffset<int64_t, IndexType, Dim>::get(slice, indices);
T* inputSliceStart = &input.data[sliceStartIndex];
T* topKSliceStart = &topK.data[topKSliceStartIndex];
int64_t* indicesSliceStart = &indices.data[indicesSliceStartIndex];
// Find the k-th highest element in our input
T topKValue = static_cast<T>(0);
radixSelect<T, typename TopKTypeConfig<T>::RadixType, IndexType, Order>(
inputSliceStart, outputSliceSize,
inputSliceSize, inputWithinSliceStride,
smem, &topKValue);
const auto topKConverted = at::native::TopKTypeConfig<T>::convert(topKValue);
// Every value that is strictly less/greater than `pattern`
// (depending on sort dir) in sorted int format is in the top-K.
// The top-K value itself might not be unique.
//
// Since there are a variable number of elements that we see that
// are within the top-k, we don't know at what index to write out
// the resulting values.
// In order to get this, we perform an exclusive prefix sum of
// `hasTopK`. This will return the resulting index into which we
// need to write the result, if a thread has a result.
// All threads need to participate in the loop and the prefix sum,
// but not necessarily in the load; hence loop bounds being rounded
// up to a multiple of the block dim.
IndexType numIterations = round_up(inputSliceSize, (IndexType) blockDim.x);
IndexType writeIndexStart = 0;
for (IndexType i = threadIdx.x; i < numIterations; i += blockDim.x) {
bool inRange = (i < inputSliceSize);
T v =
inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride]) : static_cast<T>(0);
const auto convertedV = at::native::TopKTypeConfig<T>::convert(v);
bool hasTopK;
if (Order) {
hasTopK = inRange && (convertedV > topKConverted);
} else {
hasTopK = inRange && (convertedV < topKConverted);
}
int index;
int carry;
at::cuda::exclusiveBinaryPrefixScan<int, true>(
smem, hasTopK, &index, &carry, AddOp<int>());
if (hasTopK) {
int writeIndex = writeIndexStart + index;
CUDA_KERNEL_ASSERT(writeIndex < outputSliceSize);
IndexType topKOffset = writeIndex * topKWithinSliceStride;
IndexType indexOffset = writeIndex * indicesWithinSliceStride;
topKSliceStart[topKOffset] = v;
indicesSliceStart[indexOffset] = i;
}
writeIndexStart += carry;
}
// We need to fill in the rest with actual == top-K values.
// The number that we need is outputSliceSize -
// writeIndexStart. There might be more than that number available,
// in which case we have to choose the first seen set. We do this
// via a prefix sum to calculate indices for writing results.
CUDA_KERNEL_ASSERT(outputSliceSize >= writeIndexStart);
IndexType topKRemaining = (outputSliceSize - writeIndexStart);
for (IndexType i = threadIdx.x; i < numIterations; i += blockDim.x) {
bool inRange = (i < inputSliceSize);
T v =
inRange ? doLdg(&inputSliceStart[i * inputWithinSliceStride]) : static_cast<T>(0);
const auto convertedV = at::native::TopKTypeConfig<T>::convert(v);
bool hasTopK = inRange && (convertedV == topKConverted);
int index;
int carry;
at::cuda::exclusiveBinaryPrefixScan<int, true>(
smem, hasTopK, &index, &carry, AddOp<int>());
if (hasTopK && index < topKRemaining) {
int writeIndex = writeIndexStart + index;
CUDA_KERNEL_ASSERT(writeIndex < outputSliceSize);
IndexType topKOffset = writeIndex * topKWithinSliceStride;
IndexType indexOffset = writeIndex * indicesWithinSliceStride;
topKSliceStart[topKOffset] = v;
indicesSliceStart[indexOffset] = i;
}
if (carry >= topKRemaining) {
break;
}
topKRemaining -= carry;
writeIndexStart += carry;
}
};
} // namespace
TORCH_IMPL_FUNC(topk_out_cuda)
(const Tensor& self,
int64_t k, int64_t dim, bool largest, bool sorted,
const Tensor& values,
const Tensor& indices) {
TensorArg topK_arg{values, "topK", 1}, indices_arg{indices, "indices", 2}, input_arg{self, "self", 3};
checkAllSameGPU(__func__, {topK_arg, indices_arg, input_arg});
dim = at::maybe_wrap_dim(dim, self);
int numDims = self.dim();
numDims = numDims == 0 ? 1 : numDims;
TORCH_CHECK(numDims <= MAX_DIMS, "input tensor has too many dimensions");
int64_t sliceSize = self.dim() == 0 ? 1 : self.size(dim);
Tensor input = self.contiguous();
// If k is 0 the result is an empty tensor, so we don't need to launch a kernel.
if (k == 0) {
return;
}
// static_cast is required to ensure that the correct type (INDEX_T)
// is provided to the kernel for the arguments.
#define RUN_K(INDEX_T, DIM, DIR) \
gatherTopK<scalar_t, INDEX_T, DIM, DIR> \
<<<grid, block, 0, c10::cuda::getCurrentCUDAStream()>>>( \
inputInfo, \
static_cast<INDEX_T>(sliceSize), \
static_cast<INDEX_T>(k), \
static_cast<INDEX_T>(inputSlices), \
/* The actual dimension that the k-selection is running in */ \
/* may have changed from collapseDims() */ \
static_cast<INDEX_T>(inputInfo.strides[collapseInputDim]), \
topKInfo, \
static_cast<INDEX_T>(topKSlices), \
static_cast<INDEX_T>(topKInfo.strides[collapseTopKDim]), \
indicesInfo, \
static_cast<INDEX_T>(indicesInfo.strides[collapseIndicesDim])); \
C10_CUDA_KERNEL_LAUNCH_CHECK();
#define RUN_DIR(INDEX_T, DIM) \
if (largest) { \
RUN_K(INDEX_T, DIM, true); \
} else { \
RUN_K(INDEX_T, DIM, false); \
}
#define RUN_DIM(INDEX_T) \
if (allDims == 1) { \
RUN_DIR(INDEX_T, 1); \
} else if (allDims == 2) { \
RUN_DIR(INDEX_T, 2); \
} else if (allDims == 3) { \
RUN_DIR(INDEX_T, 3); \
} else { \
RUN_DIR(INDEX_T, -1); \
}
#define RUN_T(INDEX_T) \
AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "topk_out_cuda", [&] { \
at::cuda::detail::TensorInfo<scalar_t, INDEX_T> inputInfo = \
at::cuda::detail::getTensorInfo<scalar_t, INDEX_T>(input); \
at::cuda::detail::TensorInfo<scalar_t, INDEX_T> topKInfo = \
at::cuda::detail::getTensorInfo<scalar_t, INDEX_T>(values); \
at::cuda::detail::TensorInfo<int64_t, INDEX_T> indicesInfo = \
at::cuda::detail::getTensorInfo<int64_t, INDEX_T>(indices); \
/* tensorInfoLegacyIfScalar*/ \
if (!input.dim()) { \
inputInfo.dims = 1; \
inputInfo.sizes[0] = 1; \
inputInfo.strides[0] = 1; \
topKInfo.dims = 1; \
topKInfo.sizes[0] = 1; \
topKInfo.strides[0] = 1; \
indicesInfo.dims = 1; \
indicesInfo.sizes[0] = 1; \
indicesInfo.strides[0] = 1; \
} \
/* We use these structures solely to find the offset to */ \
/* each slice we are operating on */ \
inputInfo.sizes[dim] = 1; \
topKInfo.sizes[dim] = 1; \
indicesInfo.sizes[dim] = 1; \
/* stash the stride of dim because it can be accidentally collapsed */ \
auto strideTopK = topKInfo.strides[dim]; \
auto strideIndices = indicesInfo.strides[dim]; \
/* Collapse all other dims */ \
int collapseInputDim = inputInfo.collapseDims(dim); \
int collapseTopKDim = topKInfo.collapseDims(dim); \
int collapseIndicesDim = indicesInfo.collapseDims(dim); \
/* restore stride in case it was collapsed */ \
topKInfo.strides[collapseTopKDim] = strideTopK; \
indicesInfo.strides[collapseIndicesDim] = strideIndices; \
int64_t inputSlices = 1; \
for (int i = 0; i < inputInfo.dims; ++i) { \
inputSlices *= inputInfo.sizes[i]; \
} \
int64_t topKSlices = 1; \
for (int i = 0; i < topKInfo.dims; ++i) { \
topKSlices *= topKInfo.sizes[i]; \
} \
\
dim3 grid; \
TORCH_INTERNAL_ASSERT(getGridFromTiles(inputSlices, grid), "Too many slices to sort"); \
\
dim3 block(std::min(at::ceil_div(sliceSize, (int64_t) C10_WARP_SIZE)*(int64_t) C10_WARP_SIZE, (int64_t) 1024)); \
\
/* This is used as a template parameter to calculate indices. */ \
/* We only specialize it if all collapsed dim sizes are the */ \
/* same; otherwise, we use -1 which is the specialization */ \
/* parameter for arbitrary dimensions */ \
int allDims = inputInfo.dims; \
if (topKInfo.dims != allDims || indicesInfo.dims != allDims) { \
allDims = -1; \
} \
\
RUN_DIM(INDEX_T); \
});
// the below is safe with 0-dimensional tensors because it is based on
// TensorInfo which implicitly expands to 1-dimensional.
if (input.numel() > 0) {
// Based on required index size, run the algorithm with the
// appropriate index type
if (at::cuda::detail::canUse32BitIndexMath(input) &&
at::cuda::detail::canUse32BitIndexMath(values) &&
at::cuda::detail::canUse32BitIndexMath(indices)) {
RUN_T(uint32_t);
} else {
RUN_T(uint64_t);
}
}
#undef RUN_T
#undef RUN_DIM
#undef RUN_DIR
#undef RUN_K
// Sort the results if the user wants them sorted, since our
// selection routine does not ensure sorting
if (sorted && values.numel() > 1) {
if (should_use_small_sort(values, dim)) {
// This avoids any memory allocations and performs all sorting
// work inplace along the slice
sortKeyValueInplace(values, indices, dim, largest);
} else {
// Depend upon the backup sort that returns indices, which we
// can use in conjunction with gather to produce the original
// indices.
// This is not the most efficient implementation, especially since
// there are memory allocations performed here. If the user desires
// greater performance, they should torch.gather() the results
// themselves using the reported indices, providing previously
// allocated tensors to receive the results.
Tensor sortedIndices = at::empty_like(indices);
Tensor sortedValues = at::empty_like(values);
sort_out_cuda(values, dim, largest, sortedValues, sortedIndices);
indices.copy_(indices.gather(dim, sortedIndices));
values.copy_(sortedValues);
}
}
}
} // at::native
} // at
|
1b16ace6da0b026e9ae1ed8db57d438922623665.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
#ifdef __EMSCRIPTEN__
#include <cuda/emcuda.h>
struct KernelParams {
int numArgs;
int typeArgs[4];
float *A;
float *B;
float *C;
int numElements;
};
#define STRINGIGY(a) #a
const char * vectorAdd = STRINGIGY(
#endif
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
#ifdef __EMSCRIPTEN__
);
#endif
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
// Print the vector length to be used, and compute its size
int numElements = 50000;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocate the device input vector A
float *d_A = NULL;
err = hipMalloc((void **)&d_A, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = hipMalloc((void **)&d_B, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = hipMalloc((void **)&d_C, size);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
#ifdef __EMSCRIPTEN__
struct KernelParams funcParams;
funcParams.numArgs = 4;
funcParams.typeArgs[0] = CUDA_POINTER;
funcParams.typeArgs[1] = CUDA_POINTER;
funcParams.typeArgs[2] = CUDA_POINTER;
funcParams.typeArgs[3] = CUDA_INT;
funcParams.A = d_A;
funcParams.B = d_B;
funcParams.C = d_C;
funcParams.numElements = numElements;
cudaRunKernelFunc("vectorAdd", vectorAdd, "", blocksPerGrid, threadsPerBlock, funcParams);
#else
hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements);
#endif
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
// Free device global memory
err = hipFree(d_A);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_B);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_C);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
// Reset the device and exit
err = hipDeviceReset();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
| 1b16ace6da0b026e9ae1ed8db57d438922623665.cu | /**
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
* Vector addition: C = A + B.
*
* This sample is a very basic sample that implements element by element
* vector addition. It is the same as the sample illustrating Chapter 2
* of the programming guide with some additions like error checking.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*/
#ifdef __EMSCRIPTEN__
#include <cuda/emcuda.h>
struct KernelParams {
int numArgs;
int typeArgs[4];
float *A;
float *B;
float *C;
int numElements;
};
#define STRINGIGY(a) #a
const char * vectorAdd = STRINGIGY(
#endif
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
#ifdef __EMSCRIPTEN__
);
#endif
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the vector length to be used, and compute its size
int numElements = 50000;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
// Allocate the host input vector A
float *h_A = (float *)malloc(size);
// Allocate the host input vector B
float *h_B = (float *)malloc(size);
// Allocate the host output vector C
float *h_C = (float *)malloc(size);
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < numElements; ++i)
{
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
// Allocate the device input vector A
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = cudaMalloc((void **)&d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = cudaMalloc((void **)&d_C, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
#ifdef __EMSCRIPTEN__
struct KernelParams funcParams;
funcParams.numArgs = 4;
funcParams.typeArgs[0] = CUDA_POINTER;
funcParams.typeArgs[1] = CUDA_POINTER;
funcParams.typeArgs[2] = CUDA_POINTER;
funcParams.typeArgs[3] = CUDA_INT;
funcParams.A = d_A;
funcParams.B = d_B;
funcParams.C = d_C;
funcParams.numElements = numElements;
cudaRunKernelFunc("vectorAdd", vectorAdd, "", blocksPerGrid, threadsPerBlock, funcParams);
#else
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements);
#endif
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < numElements; ++i)
{
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
// Free device global memory
err = cudaFree(d_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_C);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
// Reset the device and exit
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Done\n");
return 0;
}
|
c7b5eef86695f82ce8309a22fd515c39a66973a7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* inference-101
*/
#include "cudaYUV.h"
inline __device__ void rgb_to_y(const uint8_t r, const uint8_t g, const uint8_t b, uint8_t& y)
{
y = static_cast<uint8_t>(((int)(30 * r) + (int)(59 * g) + (int)(11 * b)) / 100);
}
inline __device__ void rgb_to_yuv(const uint8_t r, const uint8_t g, const uint8_t b, uint8_t& y, uint8_t& u, uint8_t& v)
{
rgb_to_y(r, g, b, y);
u = static_cast<uint8_t>(((int)(-17 * r) - (int)(33 * g) + (int)(50 * b) + 12800) / 100);
v = static_cast<uint8_t>(((int)(50 * r) - (int)(42 * g) - (int)(8 * b) + 12800) / 100);
}
template <typename T, bool formatYV12>
__global__ void RGB_to_YV12( T* src, int srcAlignedWidth, uint8_t* dst, int dstPitch, int width, int height )
{
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
const int x1 = x + 1;
const int y1 = y + 1;
if( x1 >= width || y1 >= height )
return;
const int planeSize = height * dstPitch;
uint8_t* y_plane = dst;
uint8_t* u_plane;
uint8_t* v_plane;
if( formatYV12 )
{
u_plane = y_plane + planeSize;
v_plane = u_plane + (planeSize / 4); // size of U & V planes is 25% of Y plane
}
else
{
v_plane = y_plane + planeSize; // in I420, order of U & V planes is reversed
u_plane = v_plane + (planeSize / 4);
}
T px;
uint8_t y_val, u_val, v_val;
px = src[y * srcAlignedWidth + x];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y * dstPitch + x] = y_val;
px = src[y * srcAlignedWidth + x1];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y * dstPitch + x1] = y_val;
px = src[y1 * srcAlignedWidth + x];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y1 * dstPitch + x] = y_val;
px = src[y1 * srcAlignedWidth + x1];
rgb_to_yuv(px.x, px.y, px.z, y_val, u_val, v_val);
y_plane[y1 * dstPitch + x1] = y_val;
const int uvPitch = dstPitch / 2;
const int uvIndex = (y / 2) * uvPitch + (x / 2);
u_plane[uvIndex] = u_val;
v_plane[uvIndex] = v_val;
}
template<typename T, bool formatYV12>
hipError_t launch420( T* input, size_t inputPitch, uint8_t* output, size_t outputPitch, size_t width, size_t height)
{
if( !input || !inputPitch || !output || !outputPitch || !width || !height )
return hipErrorInvalidValue;
const dim3 block(32, 8);
const dim3 grid(iDivUp(width, block.x * 2), iDivUp(height, block.y * 2));
const int inputAlignedWidth = inputPitch / sizeof(T);
hipLaunchKernelGGL(( RGB_to_YV12<T, formatYV12>), dim3(grid), dim3(block), 0, 0, input, inputAlignedWidth, output, outputPitch, width, height);
return CUDA(hipGetLastError());
}
// cudaRGBAToYV12
hipError_t cudaRGBAToYV12( uchar4* input, size_t inputPitch, uint8_t* output, size_t outputPitch, size_t width, size_t height )
{
return launch420<uchar4,false>( input, inputPitch, output, outputPitch, width, height );
}
// cudaRGBAToYV12
hipError_t cudaRGBAToYV12( uchar4* input, uint8_t* output, size_t width, size_t height )
{
return cudaRGBAToYV12( input, width * sizeof(uchar4), output, width * sizeof(uint8_t), width, height );
}
// cudaRGBAToI420
hipError_t cudaRGBAToI420( uchar4* input, size_t inputPitch, uint8_t* output, size_t outputPitch, size_t width, size_t height )
{
return launch420<uchar4,true>( input, inputPitch, output, outputPitch, width, height );
}
// cudaRGBAToI420
hipError_t cudaRGBAToI420( uchar4* input, uint8_t* output, size_t width, size_t height )
{
return cudaRGBAToI420( input, width * sizeof(uchar4), output, width * sizeof(uint8_t), width, height );
}
| c7b5eef86695f82ce8309a22fd515c39a66973a7.cu | /*
* inference-101
*/
#include "cudaYUV.h"
inline __device__ void rgb_to_y(const uint8_t r, const uint8_t g, const uint8_t b, uint8_t& y)
{
y = static_cast<uint8_t>(((int)(30 * r) + (int)(59 * g) + (int)(11 * b)) / 100);
}
inline __device__ void rgb_to_yuv(const uint8_t r, const uint8_t g, const uint8_t b, uint8_t& y, uint8_t& u, uint8_t& v)
{
rgb_to_y(r, g, b, y);
u = static_cast<uint8_t>(((int)(-17 * r) - (int)(33 * g) + (int)(50 * b) + 12800) / 100);
v = static_cast<uint8_t>(((int)(50 * r) - (int)(42 * g) - (int)(8 * b) + 12800) / 100);
}
template <typename T, bool formatYV12>
__global__ void RGB_to_YV12( T* src, int srcAlignedWidth, uint8_t* dst, int dstPitch, int width, int height )
{
const int x = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
const int y = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
const int x1 = x + 1;
const int y1 = y + 1;
if( x1 >= width || y1 >= height )
return;
const int planeSize = height * dstPitch;
uint8_t* y_plane = dst;
uint8_t* u_plane;
uint8_t* v_plane;
if( formatYV12 )
{
u_plane = y_plane + planeSize;
v_plane = u_plane + (planeSize / 4); // size of U & V planes is 25% of Y plane
}
else
{
v_plane = y_plane + planeSize; // in I420, order of U & V planes is reversed
u_plane = v_plane + (planeSize / 4);
}
T px;
uint8_t y_val, u_val, v_val;
px = src[y * srcAlignedWidth + x];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y * dstPitch + x] = y_val;
px = src[y * srcAlignedWidth + x1];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y * dstPitch + x1] = y_val;
px = src[y1 * srcAlignedWidth + x];
rgb_to_y(px.x, px.y, px.z, y_val);
y_plane[y1 * dstPitch + x] = y_val;
px = src[y1 * srcAlignedWidth + x1];
rgb_to_yuv(px.x, px.y, px.z, y_val, u_val, v_val);
y_plane[y1 * dstPitch + x1] = y_val;
const int uvPitch = dstPitch / 2;
const int uvIndex = (y / 2) * uvPitch + (x / 2);
u_plane[uvIndex] = u_val;
v_plane[uvIndex] = v_val;
}
template<typename T, bool formatYV12>
cudaError_t launch420( T* input, size_t inputPitch, uint8_t* output, size_t outputPitch, size_t width, size_t height)
{
if( !input || !inputPitch || !output || !outputPitch || !width || !height )
return cudaErrorInvalidValue;
const dim3 block(32, 8);
const dim3 grid(iDivUp(width, block.x * 2), iDivUp(height, block.y * 2));
const int inputAlignedWidth = inputPitch / sizeof(T);
RGB_to_YV12<T, formatYV12><<<grid, block>>>(input, inputAlignedWidth, output, outputPitch, width, height);
return CUDA(cudaGetLastError());
}
// cudaRGBAToYV12
cudaError_t cudaRGBAToYV12( uchar4* input, size_t inputPitch, uint8_t* output, size_t outputPitch, size_t width, size_t height )
{
return launch420<uchar4,false>( input, inputPitch, output, outputPitch, width, height );
}
// cudaRGBAToYV12
cudaError_t cudaRGBAToYV12( uchar4* input, uint8_t* output, size_t width, size_t height )
{
return cudaRGBAToYV12( input, width * sizeof(uchar4), output, width * sizeof(uint8_t), width, height );
}
// cudaRGBAToI420
cudaError_t cudaRGBAToI420( uchar4* input, size_t inputPitch, uint8_t* output, size_t outputPitch, size_t width, size_t height )
{
return launch420<uchar4,true>( input, inputPitch, output, outputPitch, width, height );
}
// cudaRGBAToI420
cudaError_t cudaRGBAToI420( uchar4* input, uint8_t* output, size_t width, size_t height )
{
return cudaRGBAToI420( input, width * sizeof(uchar4), output, width * sizeof(uint8_t), width, height );
}
|
c49bf441a771fa59908bee5a82e2921c8e47c1d7.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <inttypes.h>
#include <math.h>
#include "utils.hpp"
#include "update.hpp"
#include "cuStinger.hpp"
#include "algs.cuh"
#include "static_breadth_first_search/bfs_top_down.cuh"
// #include "static_breadth_first_search/bfs_bottom_up.cuh"
// #include "static_breadth_first_search/bfs_hybrid.cuh"
#include "static_connected_components/cc.cuh"
#include "static_page_rank/pr.cuh"
#include "static_betwenness_centrality/bc.cuh"
using namespace cuStingerAlgs;
#define CUDA(call, ...) do { \
hipError_t _e = (call); \
if (_e == hipSuccess) break; \
fprintf(stdout, \
"CUDA runtime error: %s (%d)\n", \
hipGetErrorString(_e), _e); \
return -1; \
} while (0)
int main(const int argc, char *argv[]){
int device=0;
hipSetDevice(device);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, device);
length_t nv, ne,*off;
vertexId_t *adj;
bool isDimacs,isSNAP,isRmat=false,isMarket;
string filename(argv[1]);
isDimacs = filename.find(".graph")==std::string::npos?false:true;
isSNAP = filename.find(".txt")==std::string::npos?false:true;
isRmat = filename.find("kron")==std::string::npos?false:true;
isMarket = filename.find(".mtx")==std::string::npos?false:true;
if(isDimacs){
readGraphDIMACS(argv[1],&off,&adj,&nv,&ne,isRmat);
}
else if(isSNAP){
readGraphSNAP(argv[1],&off,&adj,&nv,&ne,isRmat);
}
else if(isMarket){
readGraphMatrixMarket(argv[1],&off,&adj,&nv,&ne,(isRmat)?false:true);
}
else{
cout << "Unknown graph type" << endl;
}
cout << "Vertices: " << nv << " Edges: " << ne << endl;
hipEvent_t ce_start,ce_stop;
cuStinger custing(defaultInitAllocater,defaultUpdateAllocater);
cuStingerInitConfig hipInit;
hipInit.initState =eInitStateCSR;
hipInit.maxNV = nv+1;
hipInit.useVWeight = false;
hipInit.isSemantic = false; // Use edge types and vertex types
hipInit.useEWeight = false;
// CSR data
hipInit.csrNV = nv;
hipInit.csrNE = ne;
hipInit.csrOff = off;
hipInit.csrAdj = adj;
hipInit.csrVW = NULL;
hipInit.csrEW = NULL;
custing.initializeCuStinger(hipInit);
float totalTime;
ccBaseline scc;
scc.Init(custing);
scc.Reset();
start_clock(ce_start, ce_stop);
// scc.Run(custing);
totalTime = end_clock(ce_start, ce_stop);
// cout << "The number of iterations : " << scc.GetIterationCount() << endl;
// cout << "The number of connected-compoents : " << scc.CountConnectComponents(custing) << endl;
// cout << "Total time for connected-compoents : " << totalTime << endl;
scc.Release();
ccConcurrent scc2;
scc2.Init(custing);
scc2.Reset();
start_clock(ce_start, ce_stop);
// scc2.Run(custing);
totalTime = end_clock(ce_start, ce_stop);
// cout << "The number of iterations : " << scc2.GetIterationCount() << endl;
// cout << "The number of connected-compoents : " << scc2.CountConnectComponents(custing) << endl;
// cout << "Total time for connected-compoents : " << totalTime << endl;
scc2.Release();
ccConcurrentLB scc3;
scc3.Init(custing);
scc3.Reset();
start_clock(ce_start, ce_stop);
scc3.Run(custing);
totalTime = end_clock(ce_start, ce_stop);
cout << "The number of iterations : " << scc3.GetIterationCount() << endl;
cout << "The number of connected-compoents : " << scc3.CountConnectComponents(custing) << endl;
cout << "Total time for connected-compoents : " << totalTime << endl;
scc3.Release();
// ccConcurrentOptimized scc4;
// scc4.Init(custing);
// scc4.Reset();
// start_clock(ce_start, ce_stop);
// scc4.Run(custing);
// totalTime = end_clock(ce_start, ce_stop);
// cout << "The number of iterations : " << scc4.GetIterationCount() << endl;
// cout << "The number of connected-compoents : " << scc4.CountConnectComponents(custing) << endl;
// cout << "Total time for connected-compoents : " << totalTime << endl;
// scc4.Release();
// Finding largest vertex
vertexId_t maxV=0;
length_t maxLen=0;
for(int v=1; v<nv;v++){
if((off[v+1]-off[v])>maxLen){
maxV=v;
maxLen=off[v+1]-off[v];
}
}
// cout << "Largest vertex is: " << maxV << " With the length of :" << maxLen << endl;
bfsTD bfs;
bfs.Init(custing);
bfs.Reset();
bfs.setInputParameters(maxV);
start_clock(ce_start, ce_stop);
bfs.Run(custing);
totalTime = end_clock(ce_start, ce_stop);
cout << "The number of levels : " << bfs.getLevels() << endl;
cout << "The number of elements found : " << bfs.getElementsFound() << endl;
cout << "Total time for BFS - Top-Down : " << totalTime << endl;
bfs.Release();
// bfsBU bfsbu;
// bfsbu.Init(custing);
// bfsbu.Reset();
// bfsbu.setInputParameters(maxV);
// start_clock(ce_start, ce_stop);
// bfsbu.Run(custing);
// totalTime = end_clock(ce_start, ce_stop);
// cout << "The number of levels : " << bfsbu.getLevels() << endl;
// cout << "The number of elements found : " << bfsbu.getElementsFound(custing) << endl;
// cout << "Total time for BFS - Bottom-up: " << totalTime << endl;
// bfsbu.Release();
// bfsHybrid bfsHy;
// bfsHy.Init(custing);
// bfsHy.Reset();
// bfsHy.setInputParameters(maxV);
// start_clock(ce_start, ce_stop);
// bfsHy.Run(custing);
// totalTime = end_clock(ce_start, ce_stop);
// cout << "The number of levels : " << bfsHy.getLevels() << endl;
// cout << "The number of elements found : " << bfsHy.getElementsFound(custing) << endl;
// cout << "Total time for BFS - Hybrid : " << totalTime << endl;
// bfsHy.Release();
StaticPageRank pr;
pr.Init(custing);
pr.Reset();
pr.setInputParameters(5,0.001);
start_clock(ce_start, ce_stop);
pr.Run(custing);
totalTime = end_clock(ce_start, ce_stop);
cout << "The number of iterations : " << pr.getIterationCount() << endl;
cout << "Total time for pagerank : " << totalTime << endl;
cout << "Average time per iteartion : " << totalTime/(float)pr.getIterationCount() << endl;
// pr.printRankings(custing);
pr.Release();
StaticPageRank pr2;// =new StaticPageRank();
pr2.Init(custing);
pr2.Reset();
pr2.setInputParameters(5,0.001);
start_clock(ce_start, ce_stop);
pr2.Run(custing);
totalTime = end_clock(ce_start, ce_stop);
// cout << "The number of iterations : " << pr2.getIterationCount() << endl;
// cout << "Total time for pagerank : " << totalTime << endl;
// cout << "Average time per iteartion : " << totalTime/(float)pr2.getIterationCount() << endl;
// pr2.printRankings(custing);
pr2.Release();
float *bc = new float[nv];
for (int k = 0; k < nv; k++)
{
bc[k] = 0;
}
StaticBC sbc(bc);
sbc.Init(custing);
sbc.Reset();
start_clock(ce_start, ce_stop);
sbc.Run(custing);
totalTime = end_clock(ce_start, ce_stop);
cout << "Total time for Static Betweenness Centrality: " << totalTime << endl;
sbc.Reset();
sbc.Release();
delete[] bc;
custing.freecuStinger();
free(off);
free(adj);
return 0;
}
| c49bf441a771fa59908bee5a82e2921c8e47c1d7.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <inttypes.h>
#include <math.h>
#include "utils.hpp"
#include "update.hpp"
#include "cuStinger.hpp"
#include "algs.cuh"
#include "static_breadth_first_search/bfs_top_down.cuh"
// #include "static_breadth_first_search/bfs_bottom_up.cuh"
// #include "static_breadth_first_search/bfs_hybrid.cuh"
#include "static_connected_components/cc.cuh"
#include "static_page_rank/pr.cuh"
#include "static_betwenness_centrality/bc.cuh"
using namespace cuStingerAlgs;
#define CUDA(call, ...) do { \
cudaError_t _e = (call); \
if (_e == cudaSuccess) break; \
fprintf(stdout, \
"CUDA runtime error: %s (%d)\n", \
cudaGetErrorString(_e), _e); \
return -1; \
} while (0)
int main(const int argc, char *argv[]){
int device=0;
cudaSetDevice(device);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, device);
length_t nv, ne,*off;
vertexId_t *adj;
bool isDimacs,isSNAP,isRmat=false,isMarket;
string filename(argv[1]);
isDimacs = filename.find(".graph")==std::string::npos?false:true;
isSNAP = filename.find(".txt")==std::string::npos?false:true;
isRmat = filename.find("kron")==std::string::npos?false:true;
isMarket = filename.find(".mtx")==std::string::npos?false:true;
if(isDimacs){
readGraphDIMACS(argv[1],&off,&adj,&nv,&ne,isRmat);
}
else if(isSNAP){
readGraphSNAP(argv[1],&off,&adj,&nv,&ne,isRmat);
}
else if(isMarket){
readGraphMatrixMarket(argv[1],&off,&adj,&nv,&ne,(isRmat)?false:true);
}
else{
cout << "Unknown graph type" << endl;
}
cout << "Vertices: " << nv << " Edges: " << ne << endl;
cudaEvent_t ce_start,ce_stop;
cuStinger custing(defaultInitAllocater,defaultUpdateAllocater);
cuStingerInitConfig cuInit;
cuInit.initState =eInitStateCSR;
cuInit.maxNV = nv+1;
cuInit.useVWeight = false;
cuInit.isSemantic = false; // Use edge types and vertex types
cuInit.useEWeight = false;
// CSR data
cuInit.csrNV = nv;
cuInit.csrNE = ne;
cuInit.csrOff = off;
cuInit.csrAdj = adj;
cuInit.csrVW = NULL;
cuInit.csrEW = NULL;
custing.initializeCuStinger(cuInit);
float totalTime;
ccBaseline scc;
scc.Init(custing);
scc.Reset();
start_clock(ce_start, ce_stop);
// scc.Run(custing);
totalTime = end_clock(ce_start, ce_stop);
// cout << "The number of iterations : " << scc.GetIterationCount() << endl;
// cout << "The number of connected-compoents : " << scc.CountConnectComponents(custing) << endl;
// cout << "Total time for connected-compoents : " << totalTime << endl;
scc.Release();
ccConcurrent scc2;
scc2.Init(custing);
scc2.Reset();
start_clock(ce_start, ce_stop);
// scc2.Run(custing);
totalTime = end_clock(ce_start, ce_stop);
// cout << "The number of iterations : " << scc2.GetIterationCount() << endl;
// cout << "The number of connected-compoents : " << scc2.CountConnectComponents(custing) << endl;
// cout << "Total time for connected-compoents : " << totalTime << endl;
scc2.Release();
ccConcurrentLB scc3;
scc3.Init(custing);
scc3.Reset();
start_clock(ce_start, ce_stop);
scc3.Run(custing);
totalTime = end_clock(ce_start, ce_stop);
cout << "The number of iterations : " << scc3.GetIterationCount() << endl;
cout << "The number of connected-compoents : " << scc3.CountConnectComponents(custing) << endl;
cout << "Total time for connected-compoents : " << totalTime << endl;
scc3.Release();
// ccConcurrentOptimized scc4;
// scc4.Init(custing);
// scc4.Reset();
// start_clock(ce_start, ce_stop);
// scc4.Run(custing);
// totalTime = end_clock(ce_start, ce_stop);
// cout << "The number of iterations : " << scc4.GetIterationCount() << endl;
// cout << "The number of connected-compoents : " << scc4.CountConnectComponents(custing) << endl;
// cout << "Total time for connected-compoents : " << totalTime << endl;
// scc4.Release();
// Finding largest vertex
vertexId_t maxV=0;
length_t maxLen=0;
for(int v=1; v<nv;v++){
if((off[v+1]-off[v])>maxLen){
maxV=v;
maxLen=off[v+1]-off[v];
}
}
// cout << "Largest vertex is: " << maxV << " With the length of :" << maxLen << endl;
bfsTD bfs;
bfs.Init(custing);
bfs.Reset();
bfs.setInputParameters(maxV);
start_clock(ce_start, ce_stop);
bfs.Run(custing);
totalTime = end_clock(ce_start, ce_stop);
cout << "The number of levels : " << bfs.getLevels() << endl;
cout << "The number of elements found : " << bfs.getElementsFound() << endl;
cout << "Total time for BFS - Top-Down : " << totalTime << endl;
bfs.Release();
// bfsBU bfsbu;
// bfsbu.Init(custing);
// bfsbu.Reset();
// bfsbu.setInputParameters(maxV);
// start_clock(ce_start, ce_stop);
// bfsbu.Run(custing);
// totalTime = end_clock(ce_start, ce_stop);
// cout << "The number of levels : " << bfsbu.getLevels() << endl;
// cout << "The number of elements found : " << bfsbu.getElementsFound(custing) << endl;
// cout << "Total time for BFS - Bottom-up: " << totalTime << endl;
// bfsbu.Release();
// bfsHybrid bfsHy;
// bfsHy.Init(custing);
// bfsHy.Reset();
// bfsHy.setInputParameters(maxV);
// start_clock(ce_start, ce_stop);
// bfsHy.Run(custing);
// totalTime = end_clock(ce_start, ce_stop);
// cout << "The number of levels : " << bfsHy.getLevels() << endl;
// cout << "The number of elements found : " << bfsHy.getElementsFound(custing) << endl;
// cout << "Total time for BFS - Hybrid : " << totalTime << endl;
// bfsHy.Release();
StaticPageRank pr;
pr.Init(custing);
pr.Reset();
pr.setInputParameters(5,0.001);
start_clock(ce_start, ce_stop);
pr.Run(custing);
totalTime = end_clock(ce_start, ce_stop);
cout << "The number of iterations : " << pr.getIterationCount() << endl;
cout << "Total time for pagerank : " << totalTime << endl;
cout << "Average time per iteartion : " << totalTime/(float)pr.getIterationCount() << endl;
// pr.printRankings(custing);
pr.Release();
StaticPageRank pr2;// =new StaticPageRank();
pr2.Init(custing);
pr2.Reset();
pr2.setInputParameters(5,0.001);
start_clock(ce_start, ce_stop);
pr2.Run(custing);
totalTime = end_clock(ce_start, ce_stop);
// cout << "The number of iterations : " << pr2.getIterationCount() << endl;
// cout << "Total time for pagerank : " << totalTime << endl;
// cout << "Average time per iteartion : " << totalTime/(float)pr2.getIterationCount() << endl;
// pr2.printRankings(custing);
pr2.Release();
float *bc = new float[nv];
for (int k = 0; k < nv; k++)
{
bc[k] = 0;
}
StaticBC sbc(bc);
sbc.Init(custing);
sbc.Reset();
start_clock(ce_start, ce_stop);
sbc.Run(custing);
totalTime = end_clock(ce_start, ce_stop);
cout << "Total time for Static Betweenness Centrality: " << totalTime << endl;
sbc.Reset();
sbc.Release();
delete[] bc;
custing.freecuStinger();
free(off);
free(adj);
return 0;
}
|
a6112a53c93adb006f96d5198e801a80764922d9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/amp/update_loss_scaling_op.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void GpuUpdateLossScaling(
const bool* found_inf_data, const T* pre_loss_scaling_data,
const int* good_in_data, const int* bad_in_data,
const int incr_every_n_steps, const int decr_every_n_nan_or_inf,
const float incr_ratio, const float decr_ratio,
T* updated_loss_scaling_data, int* good_out_data, int* bad_out_data) {
Update<T>(found_inf_data, pre_loss_scaling_data, good_in_data, bad_in_data,
incr_every_n_steps, decr_every_n_nan_or_inf, incr_ratio, decr_ratio,
updated_loss_scaling_data, good_out_data, bad_out_data);
}
template <typename T>
__global__ void FillIf(T* data, const int64_t num, const T value,
const bool* has_inf) {
if (*has_inf) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < num; i += blockDim.x * gridDim.x) {
data[i] = value;
}
}
}
template <typename T>
class UpdateLossScalingFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& dev_ctx,
const bool* found_inf_data, const T* pre_loss_scaling_data,
const int* good_in_data, const int* bad_in_data,
const int incr_every_n_steps,
const int decr_every_n_nan_or_inf, const float incr_ratio,
const float decr_ratio, T* updated_loss_scaling_data,
int* good_out_data, int* bad_out_data) const {
hipLaunchKernelGGL(( GpuUpdateLossScaling<T>), dim3(1), dim3(1), 0, dev_ctx.stream(),
found_inf_data, pre_loss_scaling_data, good_in_data, bad_in_data,
incr_every_n_steps, decr_every_n_nan_or_inf, incr_ratio, decr_ratio,
updated_loss_scaling_data, good_out_data, bad_out_data);
}
};
template <typename T>
class LazyZeros<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& dev_ctx,
const bool* found_inf_data,
const std::vector<const framework::Tensor*>& xs,
const std::vector<framework::Tensor*>& outs) const {
for (size_t i = 0; i < xs.size(); ++i) {
auto* out = outs[i];
T* out_data = out->mutable_data<T>(dev_ctx.GetPlace());
int64_t num = out->numel();
int block = 1024;
int grid = (block - 1 + num) / block;
hipLaunchKernelGGL(( FillIf), dim3(grid), dim3(block), 0, dev_ctx.stream(),
out_data, num, static_cast<T>(0), found_inf_data);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
using GPU = paddle::platform::CUDADeviceContext;
REGISTER_OP_CUDA_KERNEL(update_loss_scaling,
ops::UpdateLossScalingKernel<GPU, float>,
ops::UpdateLossScalingKernel<GPU, double>,
ops::UpdateLossScalingKernel<GPU, plat::float16>);
| a6112a53c93adb006f96d5198e801a80764922d9.cu | /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/amp/update_loss_scaling_op.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void GpuUpdateLossScaling(
const bool* found_inf_data, const T* pre_loss_scaling_data,
const int* good_in_data, const int* bad_in_data,
const int incr_every_n_steps, const int decr_every_n_nan_or_inf,
const float incr_ratio, const float decr_ratio,
T* updated_loss_scaling_data, int* good_out_data, int* bad_out_data) {
Update<T>(found_inf_data, pre_loss_scaling_data, good_in_data, bad_in_data,
incr_every_n_steps, decr_every_n_nan_or_inf, incr_ratio, decr_ratio,
updated_loss_scaling_data, good_out_data, bad_out_data);
}
template <typename T>
__global__ void FillIf(T* data, const int64_t num, const T value,
const bool* has_inf) {
if (*has_inf) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < num; i += blockDim.x * gridDim.x) {
data[i] = value;
}
}
}
template <typename T>
class UpdateLossScalingFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& dev_ctx,
const bool* found_inf_data, const T* pre_loss_scaling_data,
const int* good_in_data, const int* bad_in_data,
const int incr_every_n_steps,
const int decr_every_n_nan_or_inf, const float incr_ratio,
const float decr_ratio, T* updated_loss_scaling_data,
int* good_out_data, int* bad_out_data) const {
GpuUpdateLossScaling<T><<<1, 1, 0, dev_ctx.stream()>>>(
found_inf_data, pre_loss_scaling_data, good_in_data, bad_in_data,
incr_every_n_steps, decr_every_n_nan_or_inf, incr_ratio, decr_ratio,
updated_loss_scaling_data, good_out_data, bad_out_data);
}
};
template <typename T>
class LazyZeros<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& dev_ctx,
const bool* found_inf_data,
const std::vector<const framework::Tensor*>& xs,
const std::vector<framework::Tensor*>& outs) const {
for (size_t i = 0; i < xs.size(); ++i) {
auto* out = outs[i];
T* out_data = out->mutable_data<T>(dev_ctx.GetPlace());
int64_t num = out->numel();
int block = 1024;
int grid = (block - 1 + num) / block;
FillIf<<<grid, block, 0, dev_ctx.stream()>>>(
out_data, num, static_cast<T>(0), found_inf_data);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
using GPU = paddle::platform::CUDADeviceContext;
REGISTER_OP_CUDA_KERNEL(update_loss_scaling,
ops::UpdateLossScalingKernel<GPU, float>,
ops::UpdateLossScalingKernel<GPU, double>,
ops::UpdateLossScalingKernel<GPU, plat::float16>);
|
3f09be6e6cb26ebf366aa08d8631ac30505834ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Device.h"
#include "Graph.h"
#include "Expression.h"
#include "Expression_p.h"
#include "Node.h"
#include "Utils.h"
#include <type_traits>
#include <cudnn.h>
#include <hipcub/hipcub.hpp>
#include <hiprand/hiprand.h>
#include <cusparse_v2.h>
static const int kThreadsPerBlock = 128;
static const int kMaxThreadsPerBlock = 512;
static inline __device__ int GetTensorStorageIndex(int logical_index, int ndims, const int *elems, const int *strides) {
int ret = 0;
for (int i = 0; i < ndims; i++) {
int cur = logical_index / elems[i];
ret += strides[i] * cur;
logical_index %= elems[i];
}
return ret;
}
struct ReduceDesc {
int regular_sizes[kMaxTensorDim + 1], reduce_sizes[kMaxTensorDim + 1];
int strides[kMaxTensorDim + 1];
};
// Reduction modes:
// 0. No reduction : each thread handle one output element from one input element.
// 1. Small reduction : reduction size is less than kSmallReducesInBlock, each thread do one reduction. (TODO)
// 2. Medium reduction : reduction size is less than kMaxThreadsPerBlock, each warp do one reduction (TODO)
// 3. Large reduction : reduction size is less than kMaxThreadsPerBlock * kMaxReducePerThread,
// each thread block do one reduction.
// 4. Huge reduction : reduction size is larger than kMaxThreadsPerBlock * kMaxReducePerThread,
// reduce is distributed over several blocks. (TODO, kMaxReducePerThread is currently set to 2147483647).
//static const int kSmallReducesInBlock = 32;
//static const int kMaxSmallReductionSize = kMaxThreadsPerBlock / kSmallReducesInBlock;
static const int kMaxReducePerThread = 2147483647;
template<typename TransformFunc, typename StoreFunc, typename ExtraData>
static __global__ void TransformReduceKernel(TransformFunc transform_func, StoreFunc store_func,
int dims, int regular_total, ExtraData extra_data) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < regular_total) {
float value = transform_func(index, extra_data);
store_func(index, value, extra_data);
}
}
template<int ndims, typename TransformFunc, typename ReduceFunc, typename StoreFunc, typename ExtraData>
static __global__ void TransformReduceKernel(TransformFunc transform_func, ReduceFunc reduce_func, StoreFunc store_func,
int regular_total, int reduce_total, ReduceDesc reduce_desc, int reduces_per_thread, ExtraData extra_data) {
typedef hipcub::BlockReduce<float, kMaxThreadsPerBlock> BlockReduceT;
__shared__ typename BlockReduceT::TempStorage temp_storage;
int regular_idx = blockIdx.x;
int reduce_idx_base = threadIdx.x;
int base_idx = GetTensorStorageIndex(regular_idx, ndims, reduce_desc.regular_sizes, reduce_desc.strides);
// First element
int index = base_idx + GetTensorStorageIndex(reduce_idx_base, ndims, reduce_desc.reduce_sizes, reduce_desc.strides);
float value = transform_func(index, extra_data);
int reduce_idx = reduce_idx_base;
for (int i = 1; i < reduces_per_thread; i++) {
reduce_idx += blockDim.x;
if (reduce_idx < reduce_total) {
int index = base_idx + GetTensorStorageIndex(reduce_idx, ndims, reduce_desc.reduce_sizes, reduce_desc.strides);
float cur_value = transform_func(index, extra_data);
// Reduce element
value = reduce_func(value, cur_value);
}
}
float result = BlockReduceT(temp_storage).Reduce(value, reduce_func, reduce_total);
if (threadIdx.x == 0)
store_func(base_idx, result, extra_data);
}
static void GetReduceDims(int dims, const int *from_dims, const int *to_dims,
int *regular_total, int *reduce_total,
int regular_sizes[kMaxTensorDim + 1], int reduce_sizes[kMaxTensorDim + 1], int strides[kMaxTensorDim + 1]) {
int regular_tot = 1, reduce_tot = 1;
int tot = 1;
for (int i = dims - 1; i >= 0; i--) {
int from_dim = from_dims[i], to_dim = to_dims[i];
strides[i] = tot;
regular_sizes[i] = regular_tot;
reduce_sizes[i] = reduce_tot;
tot *= from_dim;
if (from_dim == to_dim) {
// Regular dimension
regular_tot *= from_dim;
}
else if (to_dim == 1) {
// Reduce dimension
reduce_tot *= from_dim;
}
else // Invalid reduction operation
DEBUG_BREAK();
}
*regular_total = regular_tot;
*reduce_total = reduce_tot;
}
template<typename TransformFunc, typename ReduceFunc, typename StoreFunc, typename ExtraData>
static void TransformReduce(TransformFunc transform_func, ReduceFunc reduce_func, StoreFunc store_func,
int dims, int regular_total, int regular_sizes[kMaxTensorDim + 1],
int reduce_total, int reduce_sizes[kMaxTensorDim + 1], int strides[kMaxTensorDim + 1],
const ExtraData &extra_data) {
ReduceDesc desc;
memcpy(&desc.regular_sizes, regular_sizes, sizeof(desc.regular_sizes));
memcpy(&desc.reduce_sizes, reduce_sizes, sizeof(desc.reduce_sizes));
memcpy(&desc.strides, strides, sizeof(desc.strides));
if (reduce_total == 1) {
// 0. No reduction
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (regular_total + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( TransformReduceKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, transform_func, store_func,
dims, regular_total, extra_data);
}
else {
// 3. Large reduction
int reduces_per_thread = (reduce_total + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
if (reduces_per_thread > kMaxReducePerThread)
DEBUG_BREAK(); // TODO
int blocksPerGrid = regular_total;
int threadsPerBlock;
if (reduce_total < kMaxThreadsPerBlock)
threadsPerBlock = reduce_total;
else
threadsPerBlock = kMaxThreadsPerBlock;
switch (dims) {
case 1:hipLaunchKernelGGL(( TransformReduceKernel<1>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
transform_func, reduce_func, store_func,
regular_total, reduce_total, desc, reduces_per_thread, extra_data); break;
case 2:hipLaunchKernelGGL(( TransformReduceKernel<2>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
transform_func, reduce_func, store_func,
regular_total, reduce_total, desc, reduces_per_thread, extra_data); break;
case 3:hipLaunchKernelGGL(( TransformReduceKernel<3>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
transform_func, reduce_func, store_func,
regular_total, reduce_total, desc, reduces_per_thread, extra_data); break;
case 4:hipLaunchKernelGGL(( TransformReduceKernel<4>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
transform_func, reduce_func, store_func,
regular_total, reduce_total, desc, reduces_per_thread, extra_data); break;
case 5:hipLaunchKernelGGL(( TransformReduceKernel<5>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
transform_func, reduce_func, store_func,
regular_total, reduce_total, desc, reduces_per_thread, extra_data); break;
case 6:hipLaunchKernelGGL(( TransformReduceKernel<6>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
transform_func, reduce_func, store_func,
regular_total, reduce_total, desc, reduces_per_thread, extra_data); break;
case 7:hipLaunchKernelGGL(( TransformReduceKernel<7>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
transform_func, reduce_func, store_func,
regular_total, reduce_total, desc, reduces_per_thread, extra_data); break;
case 8:hipLaunchKernelGGL(( TransformReduceKernel<8>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
transform_func, reduce_func, store_func,
regular_total, reduce_total, desc, reduces_per_thread, extra_data); break;
default:
static_assert(8 == kMaxTensorDim + 1, "");
DEBUG_BREAK();
}
}
}
static __global__ void LookupForwardKernel(int total, int emb_size, const int *indices,
const float *x, float *y) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < total) {
int j = i / emb_size;
int k = i % emb_size;
y[i] = x[indices[j] * emb_size + k];
}
}
static __global__ void LookupBackwardKernel(int total, int emb_size, const int *indices,
const float *dEdY, float *dEdX) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < total) {
int j = i / emb_size;
int k = i % emb_size;
// TODO: Use a proper reduction mechanism, and try to make the reduction deterministic.
atomicAdd(&dEdX[indices[j] * emb_size + k], dEdY[i]);
}
}
class LookupNodeGPU : public Node {
public:
LookupNodeGPU(Graph *graph, int embeddings, int batch_size, const Shape &shape, const int *indices)
: Node{ embeddings }, batch_size_(batch_size), shape_(shape) {
int size = batch_size * shape.GetSize() * sizeof(int);
indices_pinned_ = (int*)graph->GetDevice()->AllocateMemoryPinned(size);
memcpy(indices_pinned_, indices, size);
indices_ = (int *)graph->GetDevice()->AllocateMemory(size);
CUDA_CALL(hipMemcpyAsync(indices_, indices_pinned_, size, hipMemcpyHostToDevice));
}
virtual void FreeMemory(Device *device) {
device->FreeMemoryPinned(indices_pinned_);
device->FreeMemory(indices_);
}
virtual void Forward(Graph *graph, const std::vector<const Tensor *> &x, Tensor *y) const override {
const float *x_data = x[0]->GetData();
float *y_data = y->GetData();
int total = y->GetBatchSize() * y->GetShape().GetSize();
int emb_size = y->GetShape().GetDim(1);
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (total + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( LookupForwardKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, total, emb_size, indices_,
x_data, y_data);
}
virtual void Backward(Graph *graph, const std::vector<const Tensor *> &x, const Tensor *y,
const Tensor *dEdY, const std::vector<Tensor *> &dEdX) const override {
const float *dEdY_data = dEdY->GetData();
float *dEdX_data = dEdX[0]->GetData();
int total = y->GetBatchSize() * y->GetShape().GetSize();
int emb_size = y->GetShape().GetDim(1);
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (total + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( LookupBackwardKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, total, emb_size, indices_,
dEdY_data, dEdX_data);
}
private:
int batch_size_;
Shape shape_;
int *indices_pinned_, *indices_;
};
template<typename Dummy>
struct LookupNodeFactory<Dummy, GPU> {
Node *Create(Graph *graph, int embeddings, int batch_size, const Shape &shape, const int *indices) {
return new LookupNodeGPU(graph, embeddings, batch_size, shape, indices);
}
};
template struct LookupNodeFactory<void, GPU>;
struct BinaryForwardDims {
int elems[kMaxTensorDim + 1];
int lhs_strides[kMaxTensorDim + 1], rhs_strides[kMaxTensorDim + 1];
};
template<typename ForwardFunc, int ndims>
static __global__ void BinaryForwardKernel(const float *lhs, const float *rhs, float *y,
int nelems, BinaryForwardDims forward) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < nelems) {
int lhs_index = GetTensorStorageIndex(i, ndims, forward.elems, forward.lhs_strides);
int rhs_index = GetTensorStorageIndex(i, ndims, forward.elems, forward.rhs_strides);
y[i] = ForwardFunc()(lhs[lhs_index], rhs[rhs_index]);
}
}
template<typename ForwardFunc>
static void BinaryForwardKernelWrapper(const float *lhs, const float *rhs, float *y,
int nelems, int ndims, BinaryForwardDims forward) {
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (nelems + threadsPerBlock - 1) / threadsPerBlock;
switch (ndims) {
case 1:hipLaunchKernelGGL(( BinaryForwardKernel<ForwardFunc, 1>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
lhs, rhs, y, nelems, forward); break;
case 2:hipLaunchKernelGGL(( BinaryForwardKernel<ForwardFunc, 2>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
lhs, rhs, y, nelems, forward); break;
case 3:hipLaunchKernelGGL(( BinaryForwardKernel<ForwardFunc, 3>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
lhs, rhs, y, nelems, forward); break;
case 4:hipLaunchKernelGGL(( BinaryForwardKernel<ForwardFunc, 4>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
lhs, rhs, y, nelems, forward); break;
case 5:hipLaunchKernelGGL(( BinaryForwardKernel<ForwardFunc, 5>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
lhs, rhs, y, nelems, forward); break;
case 6:hipLaunchKernelGGL(( BinaryForwardKernel<ForwardFunc, 6>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
lhs, rhs, y, nelems, forward); break;
case 7:hipLaunchKernelGGL(( BinaryForwardKernel<ForwardFunc, 7>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
lhs, rhs, y, nelems, forward); break;
case 8:hipLaunchKernelGGL(( BinaryForwardKernel<ForwardFunc, 8>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
lhs, rhs, y, nelems, forward); break;
default:
static_assert(8 == kMaxTensorDim + 1, "");
DEBUG_BREAK();
}
CUDA_CALL(hipGetLastError());
}
struct BinaryReduceDesc {
int lhs_strides[kMaxTensorDim + 1], rhs_strides[kMaxTensorDim + 1];
int strides[kMaxTensorDim + 1];
};
template<typename ForwardFunc, typename BackwardFunc>
class BinaryOpNodeGPU : public Node {
public:
BinaryOpNodeGPU(int lhs_node, int rhs_node) : Node{ lhs_node, rhs_node } {}
virtual void Forward(Graph *graph, const std::vector<const Tensor *> &x, Tensor *y) const override {
const float *lhs_data = x[0]->GetData(), *rhs_data = x[1]->GetData();
int size = y->GetShape().GetSize();
float *y_data = y->GetData();
int y_batch_size = y->GetBatchSize();
const Shape &y_shape = y->GetShape();
int nelems = y_batch_size * y_shape.GetSize();
int ndims = 1 + y_shape.GetRank();
BinaryForwardDims forward;
forward.elems[ndims - 1] = 1;
for (int i = ndims - 2; i >= 0; i--)
forward.elems[i] = forward.elems[i + 1] * y_shape.GetDim(i);
GetTensorStrides(x[0], forward.lhs_strides);
GetTensorStrides(x[1], forward.rhs_strides);
BinaryForwardKernelWrapper<ForwardFunc>(
lhs_data, rhs_data, y_data, nelems, ndims, forward);
}
virtual void Backward(Graph *graph, const std::vector<const Tensor *> &x, const Tensor *y,
const Tensor *dEdY, const std::vector<Tensor *> &dEdX) const override {
if (std::is_same<BackwardFunc, BinaryNoBackward>::value)
REPORT_ERROR("Backward propagation is unsupported for this expression.");
const float *lhs_data = x[0]->GetData(), *rhs_data = x[1]->GetData();
const float *y_data = y->GetData();
const float *dEdY_data = dEdY->GetData();
float *dEdL_data = dEdX[0]->GetData(), *dEdR_data = dEdX[1]->GetData();
const Shape &lhs_shape = x[0]->GetShape(), &rhs_shape = x[1]->GetShape();
const Shape &y_shape = y->GetShape();
int ndims = 1 + y_shape.GetRank();
int lhs_dims[kMaxTensorDim + 1], rhs_dims[kMaxTensorDim + 1];
int y_dims[kMaxTensorDim + 1];
GetTensorDims(x[0], lhs_dims);
GetTensorDims(x[1], rhs_dims);
GetTensorDims(y, y_dims);
int lhs_strides[kMaxTensorDim + 1], rhs_strides[kMaxTensorDim + 1];
GetTensorStrides(x[0], lhs_strides);
GetTensorStrides(x[1], rhs_strides);
int regular_total, reduce_total;
int regular_sizes[kMaxTensorDim + 1], reduce_sizes[kMaxTensorDim + 1];
int strides[kMaxTensorDim + 1];
BinaryReduceDesc desc;
/* LHS */
{
GetReduceDims(ndims, y_dims, lhs_dims,
®ular_total, &reduce_total, regular_sizes, reduce_sizes, strides);
memcpy(&desc.lhs_strides, lhs_strides, sizeof(desc.lhs_strides));
memcpy(&desc.rhs_strides, rhs_strides, sizeof(desc.rhs_strides));
memcpy(&desc.strides, strides, sizeof(desc.strides));
auto transform_func = [=] __device__(int index, const BinaryReduceDesc &desc) {
int lhs_index = GetTensorStorageIndex(index, ndims, desc.strides, desc.lhs_strides);
int rhs_index = GetTensorStorageIndex(index, ndims, desc.strides, desc.rhs_strides);
float dYdL_value, dYdR_value;
BackwardFunc()(lhs_data[lhs_index], rhs_data[rhs_index], y_data[index], &dYdL_value, &dYdR_value);
return dEdY_data[index] * dYdL_value;
};
auto store_func = [=] __device__(int index, float result, const BinaryReduceDesc &desc) {
int lhs_index = GetTensorStorageIndex(index, ndims, desc.strides, desc.lhs_strides);
dEdL_data[lhs_index] += result;
};
TransformReduce(transform_func, hipcub::Sum(), store_func,
ndims, regular_total, regular_sizes, reduce_total, reduce_sizes, strides, desc);
}
/* RHS */
{
GetReduceDims(ndims, y_dims, rhs_dims,
®ular_total, &reduce_total, regular_sizes, reduce_sizes, strides);
auto transform_func = [=] __device__(int index, const BinaryReduceDesc &desc) {
int lhs_index = GetTensorStorageIndex(index, ndims, desc.strides, desc.lhs_strides);
int rhs_index = GetTensorStorageIndex(index, ndims, desc.strides, desc.rhs_strides);
float dYdL_value, dYdR_value;
BackwardFunc()(lhs_data[lhs_index], rhs_data[rhs_index], y_data[index], &dYdL_value, &dYdR_value);
return dEdY_data[index] * dYdR_value;
};
auto store_func = [=] __device__(int index, float result, const BinaryReduceDesc &desc) {
int rhs_index = GetTensorStorageIndex(index, ndims, desc.strides, desc.rhs_strides);
dEdR_data[rhs_index] += result;
};
TransformReduce(transform_func, hipcub::Sum(), store_func,
ndims, regular_total, regular_sizes, reduce_total, reduce_sizes, strides, desc);
}
}
};
template<typename ForwardFunc, typename BackwardFunc>
struct BinaryOpNodeFactory<GPU, ForwardFunc, BackwardFunc> {
Node *Create(int lhs_node, int rhs_node) {
return new BinaryOpNodeGPU<ForwardFunc, BackwardFunc>(lhs_node, rhs_node);
}
};
template<typename ForwardFunc>
static __global__ void BinaryLeftScalarForwardKernel(float lhs, const float *rhs, float *y, int N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
y[i] = ForwardFunc()(lhs, rhs[i]);
}
template<typename BackwardFunc>
static __global__ void BinaryLeftScalarBackwardKernel(float lhs, const float *rhs, const float *y,
const float *dEdY, float *dEdR, int N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
float dYdL, dYdR;
BackwardFunc()(lhs, rhs[i], y[i], &dYdL, &dYdR);
dEdR[i] = dEdY[i] * dYdR;
}
}
template<typename ForwardFunc, typename BackwardFunc>
class BinaryLeftScalarOpNodeGPU : public Node {
public:
BinaryLeftScalarOpNodeGPU(float lhs_scalar, int rhs_node) : Node{ rhs_node }, lhs_scalar_(lhs_scalar) {}
virtual void Forward(Graph *graph, const std::vector<const Tensor *> &x, Tensor *y) const override {
const float *rhs_data = x[0]->GetData();
int size = y->GetShape().GetSize() * x[0]->GetBatchSize();
float *y_data = y->GetData();
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (size + kThreadsPerBlock - 1) / kThreadsPerBlock;
hipLaunchKernelGGL(( BinaryLeftScalarForwardKernel<ForwardFunc>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
lhs_scalar_, rhs_data, y_data, size);
CUDA_CALL(hipGetLastError());
}
virtual void Backward(Graph *graph, const std::vector<const Tensor *> &x, const Tensor *y,
const Tensor *dEdY, const std::vector<Tensor *> &dEdX) const override {
if (std::is_same<BackwardFunc, BinaryNoBackward>::value)
REPORT_ERROR("Backward propagation is unsupported for this expression.");
const float *rhs_data = x[0]->GetData();
const float *y_data = y->GetData();
const float *dEdY_data = dEdY->GetData();
float *dEdR_data = dEdX[0]->GetData();
int size = y->GetShape().GetSize() * x[0]->GetBatchSize();
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (size + kThreadsPerBlock - 1) / kThreadsPerBlock;
hipLaunchKernelGGL(( BinaryLeftScalarBackwardKernel<BackwardFunc>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
lhs_scalar_, rhs_data, y_data, dEdY_data, dEdR_data, size);
CUDA_CALL(hipGetLastError());
}
private:
float lhs_scalar_;
};
template<typename ForwardFunc, typename BackwardFunc>
struct BinaryLeftScalarOpNodeFactory<GPU, ForwardFunc, BackwardFunc> {
Node *Create(float lhs_scalar, int rhs_node) {
return new BinaryLeftScalarOpNodeGPU<ForwardFunc, BackwardFunc>(lhs_scalar, rhs_node);
}
};
template<typename ForwardFunc>
static __global__ void BinaryRightScalarForwardKernel(const float *lhs, float rhs, float *y, int N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
y[i] = ForwardFunc()(lhs[i], rhs);
}
template<typename BackwardFunc>
static __global__ void BinaryRightScalarBackwardKernel(const float *lhs, float rhs, const float *y,
const float *dEdY, float *dEdL, int N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
float dYdL, dYdR;
BackwardFunc()(lhs[i], rhs, y[i], &dYdL, &dYdR);
dEdL[i] = dEdY[i] * dYdL;
}
}
template<typename ForwardFunc, typename BackwardFunc>
class BinaryRightScalarOpNodeGPU : public Node {
public:
BinaryRightScalarOpNodeGPU(int lhs_node, float rhs_scalar) : Node{ lhs_node }, rhs_scalar_(rhs_scalar) {}
virtual void Forward(Graph *graph, const std::vector<const Tensor *> &x, Tensor *y) const override {
const float *lhs_data = x[0]->GetData();
int size = y->GetShape().GetSize() * x[0]->GetBatchSize();
float *y_data = y->GetData();
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (size + kThreadsPerBlock - 1) / kThreadsPerBlock;
hipLaunchKernelGGL(( BinaryRightScalarForwardKernel<ForwardFunc>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
lhs_data, rhs_scalar_, y_data, size);
CUDA_CALL(hipGetLastError());
}
virtual void Backward(Graph *graph, const std::vector<const Tensor *> &x, const Tensor *y,
const Tensor *dEdY, const std::vector<Tensor *> &dEdX) const override {
if (std::is_same<BackwardFunc, BinaryNoBackward>::value)
REPORT_ERROR("Backward propagation is unsupported for this expression.");
const float *lhs_data = x[0]->GetData();
const float *y_data = y->GetData();
const float *dEdY_data = dEdY->GetData();
float *dEdL_data = dEdX[0]->GetData();
int size = y->GetShape().GetSize() * x[0]->GetBatchSize();
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (size + kThreadsPerBlock - 1) / kThreadsPerBlock;
hipLaunchKernelGGL(( BinaryRightScalarBackwardKernel<BackwardFunc>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
lhs_data, rhs_scalar_, y_data, dEdY_data, dEdL_data, size);
CUDA_CALL(hipGetLastError());
}
private:
float rhs_scalar_;
};
template<typename ForwardFunc, typename BackwardFunc>
struct BinaryRightScalarOpNodeFactory<GPU, ForwardFunc, BackwardFunc> {
Node *Create(int lhs_node, float rhs_scalar) {
return new BinaryRightScalarOpNodeGPU<ForwardFunc, BackwardFunc>(lhs_node, rhs_scalar);
}
};
template<typename ForwardFunc>
static __global__ void UnaryForwardKernel(const float *x, float *y, int N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
y[i] = ForwardFunc()(x[i]);
}
template<typename BackwardFunc>
static __global__ void UnaryBackwardKernel(const float *x, const float *y,
const float *dEdY, float *dEdX, int N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
float dYdX;
BackwardFunc()(x[i], y[i], &dYdX);
dEdX[i] = dEdY[i] * dYdX;
}
}
template<typename ForwardFunc, typename BackwardFunc>
class UnaryOpNodeGPU : public Node {
public:
UnaryOpNodeGPU(int node) : Node{ node } {}
virtual void Forward(Graph *graph, const std::vector<const Tensor *> &x, Tensor *y) const override {
const float *x_data = x[0]->GetData();
int size = y->GetShape().GetSize() * x[0]->GetBatchSize();
float *y_data = y->GetData();
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (size + kThreadsPerBlock - 1) / kThreadsPerBlock;
hipLaunchKernelGGL(( UnaryForwardKernel<ForwardFunc>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, x_data, y_data, size);
CUDA_CALL(hipGetLastError());
}
virtual void Backward(Graph *graph, const std::vector<const Tensor *> &x, const Tensor *y,
const Tensor *dEdY, const std::vector<Tensor *> &dEdX) const override {
const float *x_data = x[0]->GetData();
const float *y_data = y->GetData();
const float *dEdY_data = dEdY->GetData();
float *dEdX_data = dEdX[0]->GetData();
int size = y->GetShape().GetSize() * x[0]->GetBatchSize();
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (size + kThreadsPerBlock - 1) / kThreadsPerBlock;
hipLaunchKernelGGL(( UnaryBackwardKernel<BackwardFunc>), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
x_data, y_data, dEdY_data, dEdX_data, size);
CUDA_CALL(hipGetLastError());
}
};
template<typename ForwardFunc, typename BackwardFunc>
struct UnaryOpNodeFactory<GPU, ForwardFunc, BackwardFunc> {
Node *Create(int node) {
return new UnaryOpNodeGPU<ForwardFunc, BackwardFunc>(node);
}
};
INSTANTIATE_BINARY_OPS(GPU)
INSTANTIATE_BINARY_LEFT_SCALAR_OPS(GPU)
INSTANTIATE_BINARY_RIGHT_SCALAR_OPS(GPU)
INSTANTIATE_UNARY_OPS(GPU)
class SparseDotNodeGPU : public Node {
public:
SparseDotNodeGPU(int lhs, int rhs) : Node{ lhs, rhs } {
CUSPARSE_CALL(hipsparseCreateMatDescr(&mat_desc_));
CUSPARSE_CALL(hipsparseSetMatType(mat_desc_, HIPSPARSE_MATRIX_TYPE_GENERAL));
CUSPARSE_CALL(hipsparseSetMatIndexBase(mat_desc_, HIPSPARSE_INDEX_BASE_ZERO));
}
virtual ~SparseDotNodeGPU() {
CUSPARSE_CALL(hipsparseDestroyMatDescr(mat_desc_));
}
virtual int GetFlags() const override {
return NoAllocateBackwardOutput;
}
virtual void Forward(Graph *graph, const std::vector<const Tensor *> &x, Tensor *y) const override {
const Tensor *lhs = x[0], *rhs = x[1];
float alpha = 1.f, beta = 0.f;
CUSPARSE_CALL(hipsparseScsrmv(graph->GetDevice()->GetCuSPARSEHandle(), HIPSPARSE_OPERATION_NON_TRANSPOSE,
lhs->GetBatchSize(), lhs->GetShape().GetDim(0), lhs->GetNonZeroCount(),
&alpha, mat_desc_, lhs->GetSparseData(), lhs->GetSparseRowIndices(), lhs->GetSparseColumnIndices(),
rhs->GetData(), &beta, y->GetData()));
}
virtual void Backward(Graph *graph, const std::vector<const Tensor *> &x, const Tensor *y,
const Tensor *dEdY, const std::vector<Tensor *> &dEdX) const override {
const Tensor *lhs = x[0], *rhs = x[1];
Tensor *dEdL = dEdX[0], *dEdR = dEdX[1];
AllocateClearTensor(graph, dEdR);
// dEdL += dEdY * R'
// dEdR += L' * dEdY
float alpha = 1.f, beta = 1.f;
// dEdL not implemented for now.
CUSPARSE_CALL(hipsparseScsrmv(graph->GetDevice()->GetCuSPARSEHandle(), HIPSPARSE_OPERATION_TRANSPOSE,
lhs->GetBatchSize(), lhs->GetShape().GetDim(0), lhs->GetNonZeroCount(),
&alpha, mat_desc_, lhs->GetSparseData(), lhs->GetSparseRowIndices(), lhs->GetSparseColumnIndices(),
dEdY->GetData(), &beta, dEdR->GetData()));
}
private:
hipsparseMatDescr_t mat_desc_;
};
template<typename Dummy>
struct SparseDotNodeFactory<Dummy, GPU> {
Node *Create(int lhs_node, int rhs_node) {
return new SparseDotNodeGPU(lhs_node, rhs_node);
}
};
template struct SparseDotNodeFactory<void, GPU>;
class PoolingNodeGPU : public Node {
public:
PoolingNodeGPU(int node, const Shape &filter_shape, const Shape &strides, const Shape &padding, PoolingMode mode)
: Node{ node }, filter_shape_(filter_shape), strides_(strides), padding_(padding), mode_(mode) {
CUDNN_CALL(cudnnCreatePoolingDescriptor(&pooling_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&y_desc_));
}
virtual ~PoolingNodeGPU() {
CUDNN_CALL(cudnnDestroyPoolingDescriptor(pooling_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(x_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(y_desc_));
}
virtual void Forward(Graph *graph, const std::vector<const Tensor *> &x, Tensor *y) const override {
const Shape &x_shape = x[0]->GetShape();
const Shape &y_shape = y->GetShape();
const float *x_data = x[0]->GetData();
float *y_data = y->GetData();
int ndims = y->GetShape().GetRank() - 1;
int x_dims[CUDNN_DIM_MAX], y_dims[CUDNN_DIM_MAX];
x_dims[0] = x[0]->GetBatchSize();
y_dims[0] = y->GetBatchSize();
for (int i = 0; i < ndims + 1; i++) {
x_dims[i + 1] = x_shape.GetDim(i);
y_dims[i + 1] = y_shape.GetDim(i);
}
int x_strides[CUDNN_DIM_MAX], y_strides[CUDNN_DIM_MAX];
x_strides[ndims + 1] = 1;
y_strides[ndims + 1] = 1;
for (int i = ndims; i >= 0; i--) {
x_strides[i] = x_dims[i + 1] * x_strides[i + 1];
y_strides[i] = y_dims[i + 1] * y_strides[i + 1];
}
cudnnPoolingMode_t pooling_mode;
if (mode_ == PoolingMode::MaxPooling)
pooling_mode = CUDNN_POOLING_MAX;
else if (mode_ == PoolingMode::AvgPooling)
pooling_mode = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
else if (mode_ == PoolingMode::AvgPoolingWithPadding)
pooling_mode = CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING;
else
DEBUG_BREAK();
CUDNN_CALL(cudnnSetPoolingNdDescriptor(pooling_desc_, pooling_mode, CUDNN_PROPAGATE_NAN,
ndims, filter_shape_.data(), padding_.data(), strides_.data()));
CUDNN_CALL(cudnnSetTensorNdDescriptor(x_desc_, CUDNN_DATA_FLOAT, ndims + 2, x_dims, x_strides));
CUDNN_CALL(cudnnSetTensorNdDescriptor(y_desc_, CUDNN_DATA_FLOAT, ndims + 2, y_dims, y_strides));
float alpha = 1.f, beta = 0.f;
CUDNN_CALL(cudnnPoolingForward(graph->GetDevice()->GetCuDNNHandle(), pooling_desc_,
&alpha, x_desc_, x_data, &beta, y_desc_, y_data));
}
virtual void Backward(Graph *graph, const std::vector<const Tensor *> &x, const Tensor *y,
const Tensor *dEdY, const std::vector<Tensor *> &dEdX) const override {
const float *x_data = x[0]->GetData();
const float *y_data = y->GetData();
const float *dEdY_data = dEdY->GetData();
float *dEdX_data = dEdX[0]->GetData();
float alpha = 1.f, beta = 1.f;
CUDNN_CALL(cudnnPoolingBackward(graph->GetDevice()->GetCuDNNHandle(), pooling_desc_,
&alpha, y_desc_, y_data, y_desc_, dEdY_data, x_desc_, x_data, &beta,
x_desc_, dEdX_data));
}
private:
Shape filter_shape_, strides_, padding_;
PoolingMode mode_;
cudnnPoolingDescriptor_t pooling_desc_;
cudnnTensorDescriptor_t x_desc_, y_desc_;
};
template<typename Dummy>
struct PoolingNodeFactory<Dummy, GPU> {
Node *Create(int node, const Shape &filter_shape, const Shape &strides, const Shape &padding, PoolingMode mode) {
return new PoolingNodeGPU(node, filter_shape, strides, padding, mode);
}
};
template struct PoolingNodeFactory<void, GPU>;
struct ReduceSumDesc {
int x_strides[kMaxTensorDim + 1], y_strides[kMaxTensorDim + 1];
};
struct Empty {};
static __global__ void ReduceSumBackwardKernel(int nelems, int size,
const float *dEdY, float *dEdX) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < nelems) {
dEdX[i] += dEdY[i / size];
}
}
class ReduceSumNodeGPU : public Node {
public:
ReduceSumNodeGPU(int node, int axis) : Node{ node }, axis_(axis) {}
virtual void Forward(Graph *graph, const std::vector<const Tensor *> &x, Tensor *y) const override {
const float *x_data = (float*)x[0]->GetData();
float *y_data = (float*)y->GetData();
int size = x[0]->GetShape().GetSize();
int x_dims[kMaxTensorDim + 1], y_dims[kMaxTensorDim + 1];
int dims;
ReduceSumDesc desc;
if (axis_ == -1) {
dims = 2;
x_dims[0] = x[0]->GetBatchSize();
x_dims[1] = x[0]->GetShape().GetSize();
y_dims[0] = x[0]->GetBatchSize();
y_dims[1] = 1;
desc.y_strides[0] = 1;
desc.y_strides[1] = 0;
}
else {
dims = y->GetShape().GetRank() + 1;
GetTensorDims(x[0], x_dims);
GetTensorDims(y, y_dims);
GetTensorStrides(y, desc.y_strides);
}
int regular_total, reduce_total;
int regular_sizes[kMaxTensorDim + 1], reduce_sizes[kMaxTensorDim + 1];
int strides[kMaxTensorDim + 1];
GetReduceDims(dims, x_dims, y_dims, ®ular_total, &reduce_total,
regular_sizes, reduce_sizes, strides);
memcpy(desc.x_strides, strides, sizeof(strides));
auto transform_func = [=] __device__ (int index, Empty) {
return x_data[index];
};
auto store_func = [=] __device__ (int index, float value, Empty) {
int y_index = GetTensorStorageIndex(index, dims, desc.x_strides, desc.y_strides);
y_data[y_index] = value;
};
TransformReduce(transform_func, hipcub::Sum(), store_func, dims,
regular_total, regular_sizes, reduce_total, reduce_sizes, strides, Empty());
}
virtual void Backward(Graph *graph, const std::vector<const Tensor *> &x, const Tensor *y,
const Tensor *dEdY, const std::vector<Tensor *> &dEdX) const override {
const float *dEdY_data = (float*)dEdY->GetData();
float *dEdX_data = (float*)dEdX[0]->GetData();
int size = x[0]->GetShape().GetSize();
int batch_size = x[0]->GetBatchSize();
int nelems = size * batch_size;
if (axis_ != -1)
REPORT_ERROR("Unsupported.");
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (nelems + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( ReduceSumBackwardKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
nelems, size, dEdY_data, dEdX_data);
}
private:
int axis_;
};
template<typename Dummy>
struct ReduceSumNodeFactory<Dummy, GPU> {
Node *Create(int node, int axis) {
return new ReduceSumNodeGPU(node, axis);
}
};
template struct ReduceSumNodeFactory<void, GPU>;
struct SliceDesc {
int elems[kMaxTensorDim + 1], strides[kMaxTensorDim + 1];
};
static __global__ void SliceForwardKernel(int count, int base_index, int ndims, SliceDesc desc,
const float *x, float *y) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < count) {
int index = base_index + GetTensorStorageIndex(i, ndims, desc.elems, desc.strides);
y[i] = x[index];
}
}
static __global__ void SliceBackwardKernel(int count, int base_index, int ndims, SliceDesc desc,
const float *dEdY, float *dEdX) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < count) {
int index = base_index + GetTensorStorageIndex(i, ndims, desc.elems, desc.strides);
dEdX[index] += dEdY[i];
}
}
class SliceNodeGPU : public Node {
public:
SliceNodeGPU(int node, const Shape &start, const Shape &size) : Node{ node }, start_(start), size_(size) {}
virtual void Forward(Graph *graph, const std::vector<const Tensor *> &x, Tensor *y) const override {
int ndims = x[0]->GetShape().GetRank() + 1;
GetTensorStrides(x[0], desc_.strides);
base_index_ = 0;
for (int i = 1; i < ndims; i++)
base_index_ += desc_.strides[i] * start_.GetDim(i - 1);
desc_.elems[ndims - 1] = 1;
for (int i = ndims - 2; i >= 0; i--)
desc_.elems[i] = desc_.elems[i + 1] * size_.GetDim(i);
count_ = desc_.elems[0] * x[0]->GetBatchSize();
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (count_ + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( SliceForwardKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, count_, base_index_, ndims,
desc_, x[0]->GetData(), y->GetData());
}
virtual void Backward(Graph *graph, const std::vector<const Tensor *> &x, const Tensor *y,
const Tensor *dEdY, const std::vector<Tensor *> &dEdX) const override {
int ndims = x[0]->GetShape().GetRank() + 1;
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (count_ + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( SliceBackwardKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, count_, base_index_, ndims,
desc_, dEdY->GetData(), dEdX[0]->GetData());
}
private:
Shape start_, size_;
mutable SliceDesc desc_;
mutable int count_, base_index_;
};
template<typename Dummy>
struct SliceNodeFactory<Dummy, GPU> {
Node *Create(int node, const Shape &start, const Shape &size) {
return new SliceNodeGPU(node, start, size);
}
};
template struct SliceNodeFactory<void, GPU>;
static __global__ void ConcatForwardKernel(int N, float **axis_bases,
int *higher_strides, float *y, int higher_stride, int axis_stride) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
int h = i / higher_stride;
int j = i % higher_stride;
int t = j / axis_stride;
int l = j % axis_stride;
y[i] = axis_bases[t][h * higher_strides[t] + l];
}
}
static __global__ void ConcatBackwardKernel(int N, float **axis_bases,
int *higher_strides, float *dEdY, int higher_stride, int axis_stride) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
int h = i / higher_stride;
int j = i % higher_stride;
int t = j / axis_stride;
int l = j % axis_stride;
axis_bases[t][h * higher_strides[t] + l] += dEdY[i];
}
}
class ConcatNodeGPU : public Node {
public:
ConcatNodeGPU(initializer_view<Expression> values, int axis) : Node(values), axis_(axis) {}
virtual void FreeMemory(Device *device) override {
if (axis_bases_)
device->FreeMemoryPinned(axis_bases_);
if (axis_bases_backward_)
device->FreeMemoryPinned(axis_bases_backward_);
if (higher_strides_)
device->FreeMemoryPinned(higher_strides_);
}
virtual void Forward(Graph *graph, const std::vector<const Tensor *> &x, Tensor *y) const override {
const Shape &y_shape = y->GetShape();
int N = y_shape.GetSize();
int axis_stride = y_shape.GetSizeRange(axis_ + 1, y_shape.GetRank());
int axis_total = y_shape.GetDim(axis_);
int higher_stride = axis_stride * axis_total;
int higher_size = y_shape.GetSizeRange(0, axis_);
axis_bases_ = (float**)graph->GetDevice()->AllocateMemoryPinned(sizeof(float*) * axis_total);
higher_strides_ = (int*)graph->GetDevice()->AllocateMemoryPinned(sizeof(int) * axis_total);
int k = 0;
for (size_t i = 0; i < x.size(); i++) {
int cur_axis_total = x[i]->GetShape().GetDim(axis_);
float *cur_data_base = x[i]->GetData();
for (int j = 0; j < cur_axis_total; j++) {
axis_bases_[k] = cur_data_base;
higher_strides_[k] = cur_axis_total * axis_stride;
cur_data_base += axis_stride;
k++;
}
}
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( ConcatForwardKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, N, axis_bases_, higher_strides_,
y->GetData(), higher_stride, axis_stride);
CUDA_CALL(hipDeviceSynchronize());
CUDA_CALL(hipGetLastError());
}
virtual void Backward(Graph *graph, const std::vector<const Tensor *> &x, const Tensor *y,
const Tensor *dEdY, const std::vector<Tensor *> &dEdX) const override {
const Shape &y_shape = y->GetShape();
int N = y_shape.GetSize();
int axis_stride = y_shape.GetSizeRange(axis_ + 1, y_shape.GetRank());
int axis_total = y_shape.GetDim(axis_);
int higher_stride = axis_stride * axis_total;
int higher_size = y_shape.GetSizeRange(0, axis_);
axis_bases_backward_ = (float**)graph->GetDevice()->AllocateMemoryPinned(sizeof(float*) * axis_total);
int k = 0;
for (size_t i = 0; i < x.size(); i++) {
int cur_axis_total = x[i]->GetShape().GetDim(axis_);
float *cur_data_base = dEdX[i]->GetData();
for (int j = 0; j < cur_axis_total; j++) {
axis_bases_backward_[k] = cur_data_base;
cur_data_base += axis_stride;
k++;
}
}
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( ConcatBackwardKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, N, axis_bases_backward_, higher_strides_,
dEdY->GetData(), higher_stride, axis_stride);
CUDA_CALL(hipGetLastError());
}
private:
int axis_;
mutable float **axis_bases_ = nullptr, **axis_bases_backward_ = nullptr;
mutable int *higher_strides_ = nullptr;
};
template<typename Dummy>
struct ConcatNodeFactory<Dummy, GPU> {
Node *Create(initializer_view<Expression> values, int axis) {
return new ConcatNodeGPU(values, axis);
}
};
template struct ConcatNodeFactory<void, GPU>;
static __global__ void DropoutForwardKernel(int n, float p, float mul_scale,
const float *probs, const float *x, float *y) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
if (probs[i] <= p)
y[i] = 0.f;
else
y[i] = x[i] * mul_scale;
}
}
static __global__ void DropoutBackwardKernel(int n, float p, float mul_scale,
const float *probs, const float *dEdY, float *dEdX) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
if (probs[i] > p)
dEdX[i] += dEdY[i] * mul_scale;
}
}
class DropoutNodeGPU : public Node {
public:
DropoutNodeGPU(int node, float p) : Node{ node }, p_(p) {}
virtual void FreeMemory(Device *device) {
device->FreeMemory(probs_);
}
virtual void Forward(Graph *graph, const std::vector<const Tensor *> &x, Tensor *y) const override {
const float *x_data = x[0]->GetData();
float *y_data = y->GetData();
int size = x[0]->GetBatchSize() * x[0]->GetShape().GetSize();
probs_ = (float *)graph->GetDevice()->AllocateMemory(size * sizeof(float));
hiprandGenerator_t generator = graph->GetDevice()->GetCuRANDGenerator();
CURAND_CALL(hiprandGenerateUniform(generator, probs_, size));
float scale = 1.f / (1.f - p_);
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (size + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( DropoutForwardKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, size, p_, scale, probs_, x_data, y_data);
}
virtual void Backward(Graph *graph, const std::vector<const Tensor *> &x, const Tensor *y,
const Tensor *dEdY, const std::vector<Tensor *> &dEdX) const override {
const float *dEdY_data = dEdY->GetData();
float *dEdX_data = dEdX[0]->GetData();
int size = x[0]->GetBatchSize() * x[0]->GetShape().GetSize();
float scale = 1.f - p_;
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (size + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( DropoutBackwardKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, size, p_, scale, probs_, dEdY_data, dEdX_data);
}
private:
float p_;
mutable float *probs_;
};
template<typename Dummy>
struct DropoutNodeFactory<Dummy, GPU> {
Node *Create(int node, float p) {
return new DropoutNodeGPU(node, p);
}
};
template struct DropoutNodeFactory<void, GPU>;
class SoftmaxNodeGPU : public Node {
public:
SoftmaxNodeGPU(int node) : Node{ node } {}
virtual void Forward(Graph *graph, const std::vector<const Tensor *> &x, Tensor *y) const override {
// y = exp(x_i) / sum(exp(x_i))
const Shape &input_shape = x[0]->GetShape();
int size = input_shape.GetSizeRange(0, input_shape.GetRank() - 1);
size *= x[0]->GetBatchSize();
int dim_size = input_shape.GetDim(input_shape.GetRank() - 1);
const float *x_data = x[0]->GetData();
float *y_data = y->GetData();
float alpha = 1.f, beta = 0.f;
cudnnHandle_t cudnn_handle = graph->GetDevice()->GetCuDNNHandle();
cudnnTensorDescriptor_t tensor_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&tensor_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(tensor_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, size, dim_size, 1, 1));
CUDNN_CALL(cudnnSoftmaxForward(cudnn_handle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL,
&alpha, tensor_desc, x_data, &beta, tensor_desc, y_data));
CUDNN_CALL(cudnnDestroyTensorDescriptor(tensor_desc));
}
virtual void Backward(Graph *graph, const std::vector<const Tensor *> &x, const Tensor *y,
const Tensor *dEdY, const std::vector<Tensor *> &dEdX) const override {
// dY/dX_i = y_i*dEdy_i - y_i*sum_j{y_j*dEdy_j}
const Shape &input_shape = x[0]->GetShape();
int size = x[0]->GetShape().GetSizeRange(0, input_shape.GetRank() - 1);
size *= x[0]->GetBatchSize();
int dim_size = input_shape.GetDim(input_shape.GetRank() - 1);
const float *y_data = y->GetData();
const float *dEdY_data = dEdY->GetData();
float *dEdX_data = dEdX[0]->GetData();
float alpha = 1.f, beta = 1.f;
cudnnHandle_t cudnn_handle = graph->GetDevice()->GetCuDNNHandle();
cudnnTensorDescriptor_t tensor_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&tensor_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(tensor_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, size, dim_size, 1, 1));
CUDNN_CALL(cudnnSoftmaxBackward(cudnn_handle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL,
&alpha, tensor_desc, y_data, tensor_desc, dEdY_data, &beta, tensor_desc, dEdX_data));
CUDNN_CALL(cudnnDestroyTensorDescriptor(tensor_desc));
}
};
template<typename Dummy>
struct SoftmaxNodeFactory<Dummy, GPU> {
Node *Create(int node) {
return new SoftmaxNodeGPU(node);
}
};
template struct SoftmaxNodeFactory<void, GPU>;
static __global__ void CrossEntropyForward(const float *x, float *y, const int *labels, int N, int dim_size) {
// y = -log(x_k)
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
int label = labels[i];
y[i] = -log(x[dim_size * i + label]);
}
}
static __global__ void CrossEntropyBackward(const float *x, const int *labels,
const float *dEdY, float *dEdX, int N, int dim_size) {
// dY/dX_k = -1/X_k
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
int label = labels[i];
dEdX[dim_size * i + label] -= dEdY[i] * (1.f / x[dim_size * i + label]);
}
}
class CrossEntropyNodeGPU : public Node {
public:
CrossEntropyNodeGPU(Graph *graph, int node, const std::vector<int> &labels) : Node{ node } {
int size = (int)labels.size() * sizeof(int);
labels_pinned_ = (int*)graph->GetDevice()->AllocateMemoryPinned(size);
memcpy(labels_pinned_, labels.data(), size);
labels_data_ = (int *)graph->GetDevice()->AllocateMemory(size);
CUDA_CALL(hipMemcpyAsync(labels_data_, labels_pinned_, size, hipMemcpyHostToDevice));
}
virtual void FreeMemory(Device *device) override {
device->FreeMemoryPinned(labels_pinned_);
device->FreeMemory(labels_data_);
}
virtual void Forward(Graph *graph, const std::vector<const Tensor *> &x, Tensor *y) const override {
const Shape &input_shape = x[0]->GetShape();
int size = input_shape.GetSizeRange(0, input_shape.GetRank() - 2);
size *= x[0]->GetBatchSize();
int dim_size = input_shape.GetDim(input_shape.GetRank() - 1);
const float *x_data = x[0]->GetData();
float *y_data = y->GetData();
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (size + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( CrossEntropyForward), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, x_data, y_data, labels_data_, size, dim_size);
CUDA_CALL(hipGetLastError());
}
virtual void Backward(Graph *graph, const std::vector<const Tensor *> &x, const Tensor *y,
const Tensor *dEdY, const std::vector<Tensor *> &dEdX) const override {
const Shape &input_shape = x[0]->GetShape();
int size = input_shape.GetSizeRange(0, input_shape.GetRank() - 2);
size *= x[0]->GetBatchSize();
int dim_size = input_shape.GetDim(input_shape.GetRank() - 1);
const float *x_data = x[0]->GetData();
const float *dEdY_data = dEdY->GetData();
float *dEdX_data = dEdX[0]->GetData();
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (size + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( CrossEntropyBackward), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
x_data, labels_data_, dEdY_data, dEdX_data, size, dim_size);
CUDA_CALL(hipGetLastError());
}
private:
int *labels_pinned_, *labels_data_;
};
template<typename Dummy>
struct CrossEntropyNodeFactory<Dummy, GPU> {
Node *Create(Graph *graph, int node, const std::vector<int> &labels) {
return new CrossEntropyNodeGPU(graph, node, labels);
}
};
template struct CrossEntropyNodeFactory<void, GPU>;
static __global__ void ClassificationAccuracyKernel(const float *input, const int *expected, float *output,
int batch_size, int size) {
int batch_id = blockDim.x * blockIdx.x + threadIdx.x;
if (batch_id < batch_size) {
int max_index = 0;
float max_value = input[batch_id * size];
for (int i = 1; i < size; i++) {
float current = input[batch_id * size + i];
if (current > max_value) {
max_value = current;
max_index = i;
}
}
if (max_index == expected[batch_id])
output[batch_id] = 1.f;
else
output[batch_id] = 0.f;
}
}
class ClassificationAccuracyNodeGPU : public Node {
public:
ClassificationAccuracyNodeGPU(Graph *graph, int node, const std::vector<int> &labels) : Node{ node } {
int size = (int)labels.size() * sizeof(int);
// We use CUDA's automatic data migration feature since we only need the labels once
labels_pinned_ = (int*)graph->GetDevice()->AllocateMemoryPinned(size);
memcpy(labels_pinned_, labels.data(), size);
}
virtual void FreeMemory(Device *device) override {
device->FreeMemoryPinned(labels_pinned_);
}
virtual void Forward(Graph *graph, const std::vector<const Tensor *> &x, Tensor *y) const override {
const Shape &input_shape = x[0]->GetShape();
int size = input_shape.GetSizeRange(0, input_shape.GetRank() - 2);
size *= x[0]->GetBatchSize();
int dim_size = input_shape.GetDim(input_shape.GetRank() - 1);
const float *x_data = x[0]->GetData();
float *y_data = y->GetData();
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (size + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( ClassificationAccuracyKernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0,
x_data, labels_pinned_, y_data, size, dim_size);
CUDA_CALL(hipGetLastError());
}
virtual void Backward(Graph *graph, const std::vector<const Tensor *> &x, const Tensor *y,
const Tensor *dEdY, const std::vector<Tensor *> &dEdX) const override {
REPORT_ERROR("Backward propagation is unsupported for this expression.");
}
private:
int *labels_pinned_;
};
template<typename Dummy>
struct ClassificationAccuracyNodeFactory<Dummy, GPU> {
Node *Create(Graph *graph, int node, const std::vector<int> &labels) {
return new ClassificationAccuracyNodeGPU(graph, node, labels);
}
};
template struct ClassificationAccuracyNodeFactory<void, GPU>;
| 3f09be6e6cb26ebf366aa08d8631ac30505834ba.cu | #include "Device.h"
#include "Graph.h"
#include "Expression.h"
#include "Expression_p.h"
#include "Node.h"
#include "Utils.h"
#include <type_traits>
#include <cudnn.h>
#include <cub/block/block_reduce.cuh>
#include <curand.h>
#include <cusparse_v2.h>
static const int kThreadsPerBlock = 128;
static const int kMaxThreadsPerBlock = 512;
static inline __device__ int GetTensorStorageIndex(int logical_index, int ndims, const int *elems, const int *strides) {
int ret = 0;
for (int i = 0; i < ndims; i++) {
int cur = logical_index / elems[i];
ret += strides[i] * cur;
logical_index %= elems[i];
}
return ret;
}
struct ReduceDesc {
int regular_sizes[kMaxTensorDim + 1], reduce_sizes[kMaxTensorDim + 1];
int strides[kMaxTensorDim + 1];
};
// Reduction modes:
// 0. No reduction : each thread handle one output element from one input element.
// 1. Small reduction : reduction size is less than kSmallReducesInBlock, each thread do one reduction. (TODO)
// 2. Medium reduction : reduction size is less than kMaxThreadsPerBlock, each warp do one reduction (TODO)
// 3. Large reduction : reduction size is less than kMaxThreadsPerBlock * kMaxReducePerThread,
// each thread block do one reduction.
// 4. Huge reduction : reduction size is larger than kMaxThreadsPerBlock * kMaxReducePerThread,
// reduce is distributed over several blocks. (TODO, kMaxReducePerThread is currently set to 2147483647).
//static const int kSmallReducesInBlock = 32;
//static const int kMaxSmallReductionSize = kMaxThreadsPerBlock / kSmallReducesInBlock;
static const int kMaxReducePerThread = 2147483647;
template<typename TransformFunc, typename StoreFunc, typename ExtraData>
static __global__ void TransformReduceKernel(TransformFunc transform_func, StoreFunc store_func,
int dims, int regular_total, ExtraData extra_data) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < regular_total) {
float value = transform_func(index, extra_data);
store_func(index, value, extra_data);
}
}
template<int ndims, typename TransformFunc, typename ReduceFunc, typename StoreFunc, typename ExtraData>
static __global__ void TransformReduceKernel(TransformFunc transform_func, ReduceFunc reduce_func, StoreFunc store_func,
int regular_total, int reduce_total, ReduceDesc reduce_desc, int reduces_per_thread, ExtraData extra_data) {
typedef cub::BlockReduce<float, kMaxThreadsPerBlock> BlockReduceT;
__shared__ typename BlockReduceT::TempStorage temp_storage;
int regular_idx = blockIdx.x;
int reduce_idx_base = threadIdx.x;
int base_idx = GetTensorStorageIndex(regular_idx, ndims, reduce_desc.regular_sizes, reduce_desc.strides);
// First element
int index = base_idx + GetTensorStorageIndex(reduce_idx_base, ndims, reduce_desc.reduce_sizes, reduce_desc.strides);
float value = transform_func(index, extra_data);
int reduce_idx = reduce_idx_base;
for (int i = 1; i < reduces_per_thread; i++) {
reduce_idx += blockDim.x;
if (reduce_idx < reduce_total) {
int index = base_idx + GetTensorStorageIndex(reduce_idx, ndims, reduce_desc.reduce_sizes, reduce_desc.strides);
float cur_value = transform_func(index, extra_data);
// Reduce element
value = reduce_func(value, cur_value);
}
}
float result = BlockReduceT(temp_storage).Reduce(value, reduce_func, reduce_total);
if (threadIdx.x == 0)
store_func(base_idx, result, extra_data);
}
static void GetReduceDims(int dims, const int *from_dims, const int *to_dims,
int *regular_total, int *reduce_total,
int regular_sizes[kMaxTensorDim + 1], int reduce_sizes[kMaxTensorDim + 1], int strides[kMaxTensorDim + 1]) {
int regular_tot = 1, reduce_tot = 1;
int tot = 1;
for (int i = dims - 1; i >= 0; i--) {
int from_dim = from_dims[i], to_dim = to_dims[i];
strides[i] = tot;
regular_sizes[i] = regular_tot;
reduce_sizes[i] = reduce_tot;
tot *= from_dim;
if (from_dim == to_dim) {
// Regular dimension
regular_tot *= from_dim;
}
else if (to_dim == 1) {
// Reduce dimension
reduce_tot *= from_dim;
}
else // Invalid reduction operation
DEBUG_BREAK();
}
*regular_total = regular_tot;
*reduce_total = reduce_tot;
}
template<typename TransformFunc, typename ReduceFunc, typename StoreFunc, typename ExtraData>
static void TransformReduce(TransformFunc transform_func, ReduceFunc reduce_func, StoreFunc store_func,
int dims, int regular_total, int regular_sizes[kMaxTensorDim + 1],
int reduce_total, int reduce_sizes[kMaxTensorDim + 1], int strides[kMaxTensorDim + 1],
const ExtraData &extra_data) {
ReduceDesc desc;
memcpy(&desc.regular_sizes, regular_sizes, sizeof(desc.regular_sizes));
memcpy(&desc.reduce_sizes, reduce_sizes, sizeof(desc.reduce_sizes));
memcpy(&desc.strides, strides, sizeof(desc.strides));
if (reduce_total == 1) {
// 0. No reduction
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (regular_total + threadsPerBlock - 1) / threadsPerBlock;
TransformReduceKernel<<<blocksPerGrid, threadsPerBlock>>>(transform_func, store_func,
dims, regular_total, extra_data);
}
else {
// 3. Large reduction
int reduces_per_thread = (reduce_total + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
if (reduces_per_thread > kMaxReducePerThread)
DEBUG_BREAK(); // TODO
int blocksPerGrid = regular_total;
int threadsPerBlock;
if (reduce_total < kMaxThreadsPerBlock)
threadsPerBlock = reduce_total;
else
threadsPerBlock = kMaxThreadsPerBlock;
switch (dims) {
case 1: TransformReduceKernel<1><<<blocksPerGrid, threadsPerBlock>>>(
transform_func, reduce_func, store_func,
regular_total, reduce_total, desc, reduces_per_thread, extra_data); break;
case 2: TransformReduceKernel<2><<<blocksPerGrid, threadsPerBlock>>>(
transform_func, reduce_func, store_func,
regular_total, reduce_total, desc, reduces_per_thread, extra_data); break;
case 3: TransformReduceKernel<3><<<blocksPerGrid, threadsPerBlock>>>(
transform_func, reduce_func, store_func,
regular_total, reduce_total, desc, reduces_per_thread, extra_data); break;
case 4: TransformReduceKernel<4><<<blocksPerGrid, threadsPerBlock>>>(
transform_func, reduce_func, store_func,
regular_total, reduce_total, desc, reduces_per_thread, extra_data); break;
case 5: TransformReduceKernel<5><<<blocksPerGrid, threadsPerBlock>>>(
transform_func, reduce_func, store_func,
regular_total, reduce_total, desc, reduces_per_thread, extra_data); break;
case 6: TransformReduceKernel<6><<<blocksPerGrid, threadsPerBlock>>>(
transform_func, reduce_func, store_func,
regular_total, reduce_total, desc, reduces_per_thread, extra_data); break;
case 7: TransformReduceKernel<7><<<blocksPerGrid, threadsPerBlock>>>(
transform_func, reduce_func, store_func,
regular_total, reduce_total, desc, reduces_per_thread, extra_data); break;
case 8: TransformReduceKernel<8><<<blocksPerGrid, threadsPerBlock>>>(
transform_func, reduce_func, store_func,
regular_total, reduce_total, desc, reduces_per_thread, extra_data); break;
default:
static_assert(8 == kMaxTensorDim + 1, "");
DEBUG_BREAK();
}
}
}
static __global__ void LookupForwardKernel(int total, int emb_size, const int *indices,
const float *x, float *y) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < total) {
int j = i / emb_size;
int k = i % emb_size;
y[i] = x[indices[j] * emb_size + k];
}
}
static __global__ void LookupBackwardKernel(int total, int emb_size, const int *indices,
const float *dEdY, float *dEdX) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < total) {
int j = i / emb_size;
int k = i % emb_size;
// TODO: Use a proper reduction mechanism, and try to make the reduction deterministic.
atomicAdd(&dEdX[indices[j] * emb_size + k], dEdY[i]);
}
}
class LookupNodeGPU : public Node {
public:
LookupNodeGPU(Graph *graph, int embeddings, int batch_size, const Shape &shape, const int *indices)
: Node{ embeddings }, batch_size_(batch_size), shape_(shape) {
int size = batch_size * shape.GetSize() * sizeof(int);
indices_pinned_ = (int*)graph->GetDevice()->AllocateMemoryPinned(size);
memcpy(indices_pinned_, indices, size);
indices_ = (int *)graph->GetDevice()->AllocateMemory(size);
CUDA_CALL(cudaMemcpyAsync(indices_, indices_pinned_, size, cudaMemcpyHostToDevice));
}
virtual void FreeMemory(Device *device) {
device->FreeMemoryPinned(indices_pinned_);
device->FreeMemory(indices_);
}
virtual void Forward(Graph *graph, const std::vector<const Tensor *> &x, Tensor *y) const override {
const float *x_data = x[0]->GetData();
float *y_data = y->GetData();
int total = y->GetBatchSize() * y->GetShape().GetSize();
int emb_size = y->GetShape().GetDim(1);
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (total + threadsPerBlock - 1) / threadsPerBlock;
LookupForwardKernel<<<blocksPerGrid, threadsPerBlock>>>(total, emb_size, indices_,
x_data, y_data);
}
virtual void Backward(Graph *graph, const std::vector<const Tensor *> &x, const Tensor *y,
const Tensor *dEdY, const std::vector<Tensor *> &dEdX) const override {
const float *dEdY_data = dEdY->GetData();
float *dEdX_data = dEdX[0]->GetData();
int total = y->GetBatchSize() * y->GetShape().GetSize();
int emb_size = y->GetShape().GetDim(1);
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (total + threadsPerBlock - 1) / threadsPerBlock;
LookupBackwardKernel<<<blocksPerGrid, threadsPerBlock>>>(total, emb_size, indices_,
dEdY_data, dEdX_data);
}
private:
int batch_size_;
Shape shape_;
int *indices_pinned_, *indices_;
};
template<typename Dummy>
struct LookupNodeFactory<Dummy, GPU> {
Node *Create(Graph *graph, int embeddings, int batch_size, const Shape &shape, const int *indices) {
return new LookupNodeGPU(graph, embeddings, batch_size, shape, indices);
}
};
template struct LookupNodeFactory<void, GPU>;
struct BinaryForwardDims {
int elems[kMaxTensorDim + 1];
int lhs_strides[kMaxTensorDim + 1], rhs_strides[kMaxTensorDim + 1];
};
template<typename ForwardFunc, int ndims>
static __global__ void BinaryForwardKernel(const float *lhs, const float *rhs, float *y,
int nelems, BinaryForwardDims forward) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < nelems) {
int lhs_index = GetTensorStorageIndex(i, ndims, forward.elems, forward.lhs_strides);
int rhs_index = GetTensorStorageIndex(i, ndims, forward.elems, forward.rhs_strides);
y[i] = ForwardFunc()(lhs[lhs_index], rhs[rhs_index]);
}
}
template<typename ForwardFunc>
static void BinaryForwardKernelWrapper(const float *lhs, const float *rhs, float *y,
int nelems, int ndims, BinaryForwardDims forward) {
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (nelems + threadsPerBlock - 1) / threadsPerBlock;
switch (ndims) {
case 1: BinaryForwardKernel<ForwardFunc, 1><<<blocksPerGrid, threadsPerBlock>>>(
lhs, rhs, y, nelems, forward); break;
case 2: BinaryForwardKernel<ForwardFunc, 2><<<blocksPerGrid, threadsPerBlock>>>(
lhs, rhs, y, nelems, forward); break;
case 3: BinaryForwardKernel<ForwardFunc, 3><<<blocksPerGrid, threadsPerBlock>>>(
lhs, rhs, y, nelems, forward); break;
case 4: BinaryForwardKernel<ForwardFunc, 4><<<blocksPerGrid, threadsPerBlock>>>(
lhs, rhs, y, nelems, forward); break;
case 5: BinaryForwardKernel<ForwardFunc, 5><<<blocksPerGrid, threadsPerBlock>>>(
lhs, rhs, y, nelems, forward); break;
case 6: BinaryForwardKernel<ForwardFunc, 6><<<blocksPerGrid, threadsPerBlock>>>(
lhs, rhs, y, nelems, forward); break;
case 7: BinaryForwardKernel<ForwardFunc, 7><<<blocksPerGrid, threadsPerBlock>>>(
lhs, rhs, y, nelems, forward); break;
case 8: BinaryForwardKernel<ForwardFunc, 8><<<blocksPerGrid, threadsPerBlock>>>(
lhs, rhs, y, nelems, forward); break;
default:
static_assert(8 == kMaxTensorDim + 1, "");
DEBUG_BREAK();
}
CUDA_CALL(cudaGetLastError());
}
struct BinaryReduceDesc {
int lhs_strides[kMaxTensorDim + 1], rhs_strides[kMaxTensorDim + 1];
int strides[kMaxTensorDim + 1];
};
template<typename ForwardFunc, typename BackwardFunc>
class BinaryOpNodeGPU : public Node {
public:
BinaryOpNodeGPU(int lhs_node, int rhs_node) : Node{ lhs_node, rhs_node } {}
virtual void Forward(Graph *graph, const std::vector<const Tensor *> &x, Tensor *y) const override {
const float *lhs_data = x[0]->GetData(), *rhs_data = x[1]->GetData();
int size = y->GetShape().GetSize();
float *y_data = y->GetData();
int y_batch_size = y->GetBatchSize();
const Shape &y_shape = y->GetShape();
int nelems = y_batch_size * y_shape.GetSize();
int ndims = 1 + y_shape.GetRank();
BinaryForwardDims forward;
forward.elems[ndims - 1] = 1;
for (int i = ndims - 2; i >= 0; i--)
forward.elems[i] = forward.elems[i + 1] * y_shape.GetDim(i);
GetTensorStrides(x[0], forward.lhs_strides);
GetTensorStrides(x[1], forward.rhs_strides);
BinaryForwardKernelWrapper<ForwardFunc>(
lhs_data, rhs_data, y_data, nelems, ndims, forward);
}
virtual void Backward(Graph *graph, const std::vector<const Tensor *> &x, const Tensor *y,
const Tensor *dEdY, const std::vector<Tensor *> &dEdX) const override {
if (std::is_same<BackwardFunc, BinaryNoBackward>::value)
REPORT_ERROR("Backward propagation is unsupported for this expression.");
const float *lhs_data = x[0]->GetData(), *rhs_data = x[1]->GetData();
const float *y_data = y->GetData();
const float *dEdY_data = dEdY->GetData();
float *dEdL_data = dEdX[0]->GetData(), *dEdR_data = dEdX[1]->GetData();
const Shape &lhs_shape = x[0]->GetShape(), &rhs_shape = x[1]->GetShape();
const Shape &y_shape = y->GetShape();
int ndims = 1 + y_shape.GetRank();
int lhs_dims[kMaxTensorDim + 1], rhs_dims[kMaxTensorDim + 1];
int y_dims[kMaxTensorDim + 1];
GetTensorDims(x[0], lhs_dims);
GetTensorDims(x[1], rhs_dims);
GetTensorDims(y, y_dims);
int lhs_strides[kMaxTensorDim + 1], rhs_strides[kMaxTensorDim + 1];
GetTensorStrides(x[0], lhs_strides);
GetTensorStrides(x[1], rhs_strides);
int regular_total, reduce_total;
int regular_sizes[kMaxTensorDim + 1], reduce_sizes[kMaxTensorDim + 1];
int strides[kMaxTensorDim + 1];
BinaryReduceDesc desc;
/* LHS */
{
GetReduceDims(ndims, y_dims, lhs_dims,
®ular_total, &reduce_total, regular_sizes, reduce_sizes, strides);
memcpy(&desc.lhs_strides, lhs_strides, sizeof(desc.lhs_strides));
memcpy(&desc.rhs_strides, rhs_strides, sizeof(desc.rhs_strides));
memcpy(&desc.strides, strides, sizeof(desc.strides));
auto transform_func = [=] __device__(int index, const BinaryReduceDesc &desc) {
int lhs_index = GetTensorStorageIndex(index, ndims, desc.strides, desc.lhs_strides);
int rhs_index = GetTensorStorageIndex(index, ndims, desc.strides, desc.rhs_strides);
float dYdL_value, dYdR_value;
BackwardFunc()(lhs_data[lhs_index], rhs_data[rhs_index], y_data[index], &dYdL_value, &dYdR_value);
return dEdY_data[index] * dYdL_value;
};
auto store_func = [=] __device__(int index, float result, const BinaryReduceDesc &desc) {
int lhs_index = GetTensorStorageIndex(index, ndims, desc.strides, desc.lhs_strides);
dEdL_data[lhs_index] += result;
};
TransformReduce(transform_func, cub::Sum(), store_func,
ndims, regular_total, regular_sizes, reduce_total, reduce_sizes, strides, desc);
}
/* RHS */
{
GetReduceDims(ndims, y_dims, rhs_dims,
®ular_total, &reduce_total, regular_sizes, reduce_sizes, strides);
auto transform_func = [=] __device__(int index, const BinaryReduceDesc &desc) {
int lhs_index = GetTensorStorageIndex(index, ndims, desc.strides, desc.lhs_strides);
int rhs_index = GetTensorStorageIndex(index, ndims, desc.strides, desc.rhs_strides);
float dYdL_value, dYdR_value;
BackwardFunc()(lhs_data[lhs_index], rhs_data[rhs_index], y_data[index], &dYdL_value, &dYdR_value);
return dEdY_data[index] * dYdR_value;
};
auto store_func = [=] __device__(int index, float result, const BinaryReduceDesc &desc) {
int rhs_index = GetTensorStorageIndex(index, ndims, desc.strides, desc.rhs_strides);
dEdR_data[rhs_index] += result;
};
TransformReduce(transform_func, cub::Sum(), store_func,
ndims, regular_total, regular_sizes, reduce_total, reduce_sizes, strides, desc);
}
}
};
template<typename ForwardFunc, typename BackwardFunc>
struct BinaryOpNodeFactory<GPU, ForwardFunc, BackwardFunc> {
Node *Create(int lhs_node, int rhs_node) {
return new BinaryOpNodeGPU<ForwardFunc, BackwardFunc>(lhs_node, rhs_node);
}
};
template<typename ForwardFunc>
static __global__ void BinaryLeftScalarForwardKernel(float lhs, const float *rhs, float *y, int N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
y[i] = ForwardFunc()(lhs, rhs[i]);
}
template<typename BackwardFunc>
static __global__ void BinaryLeftScalarBackwardKernel(float lhs, const float *rhs, const float *y,
const float *dEdY, float *dEdR, int N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
float dYdL, dYdR;
BackwardFunc()(lhs, rhs[i], y[i], &dYdL, &dYdR);
dEdR[i] = dEdY[i] * dYdR;
}
}
template<typename ForwardFunc, typename BackwardFunc>
class BinaryLeftScalarOpNodeGPU : public Node {
public:
BinaryLeftScalarOpNodeGPU(float lhs_scalar, int rhs_node) : Node{ rhs_node }, lhs_scalar_(lhs_scalar) {}
virtual void Forward(Graph *graph, const std::vector<const Tensor *> &x, Tensor *y) const override {
const float *rhs_data = x[0]->GetData();
int size = y->GetShape().GetSize() * x[0]->GetBatchSize();
float *y_data = y->GetData();
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (size + kThreadsPerBlock - 1) / kThreadsPerBlock;
BinaryLeftScalarForwardKernel<ForwardFunc><<<blocksPerGrid, threadsPerBlock>>>(
lhs_scalar_, rhs_data, y_data, size);
CUDA_CALL(cudaGetLastError());
}
virtual void Backward(Graph *graph, const std::vector<const Tensor *> &x, const Tensor *y,
const Tensor *dEdY, const std::vector<Tensor *> &dEdX) const override {
if (std::is_same<BackwardFunc, BinaryNoBackward>::value)
REPORT_ERROR("Backward propagation is unsupported for this expression.");
const float *rhs_data = x[0]->GetData();
const float *y_data = y->GetData();
const float *dEdY_data = dEdY->GetData();
float *dEdR_data = dEdX[0]->GetData();
int size = y->GetShape().GetSize() * x[0]->GetBatchSize();
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (size + kThreadsPerBlock - 1) / kThreadsPerBlock;
BinaryLeftScalarBackwardKernel<BackwardFunc><<<blocksPerGrid, threadsPerBlock>>>(
lhs_scalar_, rhs_data, y_data, dEdY_data, dEdR_data, size);
CUDA_CALL(cudaGetLastError());
}
private:
float lhs_scalar_;
};
template<typename ForwardFunc, typename BackwardFunc>
struct BinaryLeftScalarOpNodeFactory<GPU, ForwardFunc, BackwardFunc> {
Node *Create(float lhs_scalar, int rhs_node) {
return new BinaryLeftScalarOpNodeGPU<ForwardFunc, BackwardFunc>(lhs_scalar, rhs_node);
}
};
template<typename ForwardFunc>
static __global__ void BinaryRightScalarForwardKernel(const float *lhs, float rhs, float *y, int N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
y[i] = ForwardFunc()(lhs[i], rhs);
}
template<typename BackwardFunc>
static __global__ void BinaryRightScalarBackwardKernel(const float *lhs, float rhs, const float *y,
const float *dEdY, float *dEdL, int N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
float dYdL, dYdR;
BackwardFunc()(lhs[i], rhs, y[i], &dYdL, &dYdR);
dEdL[i] = dEdY[i] * dYdL;
}
}
template<typename ForwardFunc, typename BackwardFunc>
class BinaryRightScalarOpNodeGPU : public Node {
public:
BinaryRightScalarOpNodeGPU(int lhs_node, float rhs_scalar) : Node{ lhs_node }, rhs_scalar_(rhs_scalar) {}
virtual void Forward(Graph *graph, const std::vector<const Tensor *> &x, Tensor *y) const override {
const float *lhs_data = x[0]->GetData();
int size = y->GetShape().GetSize() * x[0]->GetBatchSize();
float *y_data = y->GetData();
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (size + kThreadsPerBlock - 1) / kThreadsPerBlock;
BinaryRightScalarForwardKernel<ForwardFunc><<<blocksPerGrid, threadsPerBlock>>>(
lhs_data, rhs_scalar_, y_data, size);
CUDA_CALL(cudaGetLastError());
}
virtual void Backward(Graph *graph, const std::vector<const Tensor *> &x, const Tensor *y,
const Tensor *dEdY, const std::vector<Tensor *> &dEdX) const override {
if (std::is_same<BackwardFunc, BinaryNoBackward>::value)
REPORT_ERROR("Backward propagation is unsupported for this expression.");
const float *lhs_data = x[0]->GetData();
const float *y_data = y->GetData();
const float *dEdY_data = dEdY->GetData();
float *dEdL_data = dEdX[0]->GetData();
int size = y->GetShape().GetSize() * x[0]->GetBatchSize();
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (size + kThreadsPerBlock - 1) / kThreadsPerBlock;
BinaryRightScalarBackwardKernel<BackwardFunc><<<blocksPerGrid, threadsPerBlock>>>(
lhs_data, rhs_scalar_, y_data, dEdY_data, dEdL_data, size);
CUDA_CALL(cudaGetLastError());
}
private:
float rhs_scalar_;
};
template<typename ForwardFunc, typename BackwardFunc>
struct BinaryRightScalarOpNodeFactory<GPU, ForwardFunc, BackwardFunc> {
Node *Create(int lhs_node, float rhs_scalar) {
return new BinaryRightScalarOpNodeGPU<ForwardFunc, BackwardFunc>(lhs_node, rhs_scalar);
}
};
template<typename ForwardFunc>
static __global__ void UnaryForwardKernel(const float *x, float *y, int N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
y[i] = ForwardFunc()(x[i]);
}
template<typename BackwardFunc>
static __global__ void UnaryBackwardKernel(const float *x, const float *y,
const float *dEdY, float *dEdX, int N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
float dYdX;
BackwardFunc()(x[i], y[i], &dYdX);
dEdX[i] = dEdY[i] * dYdX;
}
}
template<typename ForwardFunc, typename BackwardFunc>
class UnaryOpNodeGPU : public Node {
public:
UnaryOpNodeGPU(int node) : Node{ node } {}
virtual void Forward(Graph *graph, const std::vector<const Tensor *> &x, Tensor *y) const override {
const float *x_data = x[0]->GetData();
int size = y->GetShape().GetSize() * x[0]->GetBatchSize();
float *y_data = y->GetData();
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (size + kThreadsPerBlock - 1) / kThreadsPerBlock;
UnaryForwardKernel<ForwardFunc><<<blocksPerGrid, threadsPerBlock>>>(x_data, y_data, size);
CUDA_CALL(cudaGetLastError());
}
virtual void Backward(Graph *graph, const std::vector<const Tensor *> &x, const Tensor *y,
const Tensor *dEdY, const std::vector<Tensor *> &dEdX) const override {
const float *x_data = x[0]->GetData();
const float *y_data = y->GetData();
const float *dEdY_data = dEdY->GetData();
float *dEdX_data = dEdX[0]->GetData();
int size = y->GetShape().GetSize() * x[0]->GetBatchSize();
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (size + kThreadsPerBlock - 1) / kThreadsPerBlock;
UnaryBackwardKernel<BackwardFunc><<<blocksPerGrid, threadsPerBlock>>>(
x_data, y_data, dEdY_data, dEdX_data, size);
CUDA_CALL(cudaGetLastError());
}
};
template<typename ForwardFunc, typename BackwardFunc>
struct UnaryOpNodeFactory<GPU, ForwardFunc, BackwardFunc> {
Node *Create(int node) {
return new UnaryOpNodeGPU<ForwardFunc, BackwardFunc>(node);
}
};
INSTANTIATE_BINARY_OPS(GPU)
INSTANTIATE_BINARY_LEFT_SCALAR_OPS(GPU)
INSTANTIATE_BINARY_RIGHT_SCALAR_OPS(GPU)
INSTANTIATE_UNARY_OPS(GPU)
class SparseDotNodeGPU : public Node {
public:
SparseDotNodeGPU(int lhs, int rhs) : Node{ lhs, rhs } {
CUSPARSE_CALL(cusparseCreateMatDescr(&mat_desc_));
CUSPARSE_CALL(cusparseSetMatType(mat_desc_, CUSPARSE_MATRIX_TYPE_GENERAL));
CUSPARSE_CALL(cusparseSetMatIndexBase(mat_desc_, CUSPARSE_INDEX_BASE_ZERO));
}
virtual ~SparseDotNodeGPU() {
CUSPARSE_CALL(cusparseDestroyMatDescr(mat_desc_));
}
virtual int GetFlags() const override {
return NoAllocateBackwardOutput;
}
virtual void Forward(Graph *graph, const std::vector<const Tensor *> &x, Tensor *y) const override {
const Tensor *lhs = x[0], *rhs = x[1];
float alpha = 1.f, beta = 0.f;
CUSPARSE_CALL(cusparseScsrmv(graph->GetDevice()->GetCuSPARSEHandle(), CUSPARSE_OPERATION_NON_TRANSPOSE,
lhs->GetBatchSize(), lhs->GetShape().GetDim(0), lhs->GetNonZeroCount(),
&alpha, mat_desc_, lhs->GetSparseData(), lhs->GetSparseRowIndices(), lhs->GetSparseColumnIndices(),
rhs->GetData(), &beta, y->GetData()));
}
virtual void Backward(Graph *graph, const std::vector<const Tensor *> &x, const Tensor *y,
const Tensor *dEdY, const std::vector<Tensor *> &dEdX) const override {
const Tensor *lhs = x[0], *rhs = x[1];
Tensor *dEdL = dEdX[0], *dEdR = dEdX[1];
AllocateClearTensor(graph, dEdR);
// dEdL += dEdY * R'
// dEdR += L' * dEdY
float alpha = 1.f, beta = 1.f;
// dEdL not implemented for now.
CUSPARSE_CALL(cusparseScsrmv(graph->GetDevice()->GetCuSPARSEHandle(), CUSPARSE_OPERATION_TRANSPOSE,
lhs->GetBatchSize(), lhs->GetShape().GetDim(0), lhs->GetNonZeroCount(),
&alpha, mat_desc_, lhs->GetSparseData(), lhs->GetSparseRowIndices(), lhs->GetSparseColumnIndices(),
dEdY->GetData(), &beta, dEdR->GetData()));
}
private:
cusparseMatDescr_t mat_desc_;
};
template<typename Dummy>
struct SparseDotNodeFactory<Dummy, GPU> {
Node *Create(int lhs_node, int rhs_node) {
return new SparseDotNodeGPU(lhs_node, rhs_node);
}
};
template struct SparseDotNodeFactory<void, GPU>;
class PoolingNodeGPU : public Node {
public:
PoolingNodeGPU(int node, const Shape &filter_shape, const Shape &strides, const Shape &padding, PoolingMode mode)
: Node{ node }, filter_shape_(filter_shape), strides_(strides), padding_(padding), mode_(mode) {
CUDNN_CALL(cudnnCreatePoolingDescriptor(&pooling_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&x_desc_));
CUDNN_CALL(cudnnCreateTensorDescriptor(&y_desc_));
}
virtual ~PoolingNodeGPU() {
CUDNN_CALL(cudnnDestroyPoolingDescriptor(pooling_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(x_desc_));
CUDNN_CALL(cudnnDestroyTensorDescriptor(y_desc_));
}
virtual void Forward(Graph *graph, const std::vector<const Tensor *> &x, Tensor *y) const override {
const Shape &x_shape = x[0]->GetShape();
const Shape &y_shape = y->GetShape();
const float *x_data = x[0]->GetData();
float *y_data = y->GetData();
int ndims = y->GetShape().GetRank() - 1;
int x_dims[CUDNN_DIM_MAX], y_dims[CUDNN_DIM_MAX];
x_dims[0] = x[0]->GetBatchSize();
y_dims[0] = y->GetBatchSize();
for (int i = 0; i < ndims + 1; i++) {
x_dims[i + 1] = x_shape.GetDim(i);
y_dims[i + 1] = y_shape.GetDim(i);
}
int x_strides[CUDNN_DIM_MAX], y_strides[CUDNN_DIM_MAX];
x_strides[ndims + 1] = 1;
y_strides[ndims + 1] = 1;
for (int i = ndims; i >= 0; i--) {
x_strides[i] = x_dims[i + 1] * x_strides[i + 1];
y_strides[i] = y_dims[i + 1] * y_strides[i + 1];
}
cudnnPoolingMode_t pooling_mode;
if (mode_ == PoolingMode::MaxPooling)
pooling_mode = CUDNN_POOLING_MAX;
else if (mode_ == PoolingMode::AvgPooling)
pooling_mode = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING;
else if (mode_ == PoolingMode::AvgPoolingWithPadding)
pooling_mode = CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING;
else
DEBUG_BREAK();
CUDNN_CALL(cudnnSetPoolingNdDescriptor(pooling_desc_, pooling_mode, CUDNN_PROPAGATE_NAN,
ndims, filter_shape_.data(), padding_.data(), strides_.data()));
CUDNN_CALL(cudnnSetTensorNdDescriptor(x_desc_, CUDNN_DATA_FLOAT, ndims + 2, x_dims, x_strides));
CUDNN_CALL(cudnnSetTensorNdDescriptor(y_desc_, CUDNN_DATA_FLOAT, ndims + 2, y_dims, y_strides));
float alpha = 1.f, beta = 0.f;
CUDNN_CALL(cudnnPoolingForward(graph->GetDevice()->GetCuDNNHandle(), pooling_desc_,
&alpha, x_desc_, x_data, &beta, y_desc_, y_data));
}
virtual void Backward(Graph *graph, const std::vector<const Tensor *> &x, const Tensor *y,
const Tensor *dEdY, const std::vector<Tensor *> &dEdX) const override {
const float *x_data = x[0]->GetData();
const float *y_data = y->GetData();
const float *dEdY_data = dEdY->GetData();
float *dEdX_data = dEdX[0]->GetData();
float alpha = 1.f, beta = 1.f;
CUDNN_CALL(cudnnPoolingBackward(graph->GetDevice()->GetCuDNNHandle(), pooling_desc_,
&alpha, y_desc_, y_data, y_desc_, dEdY_data, x_desc_, x_data, &beta,
x_desc_, dEdX_data));
}
private:
Shape filter_shape_, strides_, padding_;
PoolingMode mode_;
cudnnPoolingDescriptor_t pooling_desc_;
cudnnTensorDescriptor_t x_desc_, y_desc_;
};
template<typename Dummy>
struct PoolingNodeFactory<Dummy, GPU> {
Node *Create(int node, const Shape &filter_shape, const Shape &strides, const Shape &padding, PoolingMode mode) {
return new PoolingNodeGPU(node, filter_shape, strides, padding, mode);
}
};
template struct PoolingNodeFactory<void, GPU>;
struct ReduceSumDesc {
int x_strides[kMaxTensorDim + 1], y_strides[kMaxTensorDim + 1];
};
struct Empty {};
static __global__ void ReduceSumBackwardKernel(int nelems, int size,
const float *dEdY, float *dEdX) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < nelems) {
dEdX[i] += dEdY[i / size];
}
}
class ReduceSumNodeGPU : public Node {
public:
ReduceSumNodeGPU(int node, int axis) : Node{ node }, axis_(axis) {}
virtual void Forward(Graph *graph, const std::vector<const Tensor *> &x, Tensor *y) const override {
const float *x_data = (float*)x[0]->GetData();
float *y_data = (float*)y->GetData();
int size = x[0]->GetShape().GetSize();
int x_dims[kMaxTensorDim + 1], y_dims[kMaxTensorDim + 1];
int dims;
ReduceSumDesc desc;
if (axis_ == -1) {
dims = 2;
x_dims[0] = x[0]->GetBatchSize();
x_dims[1] = x[0]->GetShape().GetSize();
y_dims[0] = x[0]->GetBatchSize();
y_dims[1] = 1;
desc.y_strides[0] = 1;
desc.y_strides[1] = 0;
}
else {
dims = y->GetShape().GetRank() + 1;
GetTensorDims(x[0], x_dims);
GetTensorDims(y, y_dims);
GetTensorStrides(y, desc.y_strides);
}
int regular_total, reduce_total;
int regular_sizes[kMaxTensorDim + 1], reduce_sizes[kMaxTensorDim + 1];
int strides[kMaxTensorDim + 1];
GetReduceDims(dims, x_dims, y_dims, ®ular_total, &reduce_total,
regular_sizes, reduce_sizes, strides);
memcpy(desc.x_strides, strides, sizeof(strides));
auto transform_func = [=] __device__ (int index, Empty) {
return x_data[index];
};
auto store_func = [=] __device__ (int index, float value, Empty) {
int y_index = GetTensorStorageIndex(index, dims, desc.x_strides, desc.y_strides);
y_data[y_index] = value;
};
TransformReduce(transform_func, cub::Sum(), store_func, dims,
regular_total, regular_sizes, reduce_total, reduce_sizes, strides, Empty());
}
virtual void Backward(Graph *graph, const std::vector<const Tensor *> &x, const Tensor *y,
const Tensor *dEdY, const std::vector<Tensor *> &dEdX) const override {
const float *dEdY_data = (float*)dEdY->GetData();
float *dEdX_data = (float*)dEdX[0]->GetData();
int size = x[0]->GetShape().GetSize();
int batch_size = x[0]->GetBatchSize();
int nelems = size * batch_size;
if (axis_ != -1)
REPORT_ERROR("Unsupported.");
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (nelems + threadsPerBlock - 1) / threadsPerBlock;
ReduceSumBackwardKernel<<<blocksPerGrid, threadsPerBlock>>>(
nelems, size, dEdY_data, dEdX_data);
}
private:
int axis_;
};
template<typename Dummy>
struct ReduceSumNodeFactory<Dummy, GPU> {
Node *Create(int node, int axis) {
return new ReduceSumNodeGPU(node, axis);
}
};
template struct ReduceSumNodeFactory<void, GPU>;
struct SliceDesc {
int elems[kMaxTensorDim + 1], strides[kMaxTensorDim + 1];
};
static __global__ void SliceForwardKernel(int count, int base_index, int ndims, SliceDesc desc,
const float *x, float *y) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < count) {
int index = base_index + GetTensorStorageIndex(i, ndims, desc.elems, desc.strides);
y[i] = x[index];
}
}
static __global__ void SliceBackwardKernel(int count, int base_index, int ndims, SliceDesc desc,
const float *dEdY, float *dEdX) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < count) {
int index = base_index + GetTensorStorageIndex(i, ndims, desc.elems, desc.strides);
dEdX[index] += dEdY[i];
}
}
class SliceNodeGPU : public Node {
public:
SliceNodeGPU(int node, const Shape &start, const Shape &size) : Node{ node }, start_(start), size_(size) {}
virtual void Forward(Graph *graph, const std::vector<const Tensor *> &x, Tensor *y) const override {
int ndims = x[0]->GetShape().GetRank() + 1;
GetTensorStrides(x[0], desc_.strides);
base_index_ = 0;
for (int i = 1; i < ndims; i++)
base_index_ += desc_.strides[i] * start_.GetDim(i - 1);
desc_.elems[ndims - 1] = 1;
for (int i = ndims - 2; i >= 0; i--)
desc_.elems[i] = desc_.elems[i + 1] * size_.GetDim(i);
count_ = desc_.elems[0] * x[0]->GetBatchSize();
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (count_ + threadsPerBlock - 1) / threadsPerBlock;
SliceForwardKernel<<<blocksPerGrid, threadsPerBlock>>>(count_, base_index_, ndims,
desc_, x[0]->GetData(), y->GetData());
}
virtual void Backward(Graph *graph, const std::vector<const Tensor *> &x, const Tensor *y,
const Tensor *dEdY, const std::vector<Tensor *> &dEdX) const override {
int ndims = x[0]->GetShape().GetRank() + 1;
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (count_ + threadsPerBlock - 1) / threadsPerBlock;
SliceBackwardKernel<<<blocksPerGrid, threadsPerBlock>>>(count_, base_index_, ndims,
desc_, dEdY->GetData(), dEdX[0]->GetData());
}
private:
Shape start_, size_;
mutable SliceDesc desc_;
mutable int count_, base_index_;
};
template<typename Dummy>
struct SliceNodeFactory<Dummy, GPU> {
Node *Create(int node, const Shape &start, const Shape &size) {
return new SliceNodeGPU(node, start, size);
}
};
template struct SliceNodeFactory<void, GPU>;
static __global__ void ConcatForwardKernel(int N, float **axis_bases,
int *higher_strides, float *y, int higher_stride, int axis_stride) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
int h = i / higher_stride;
int j = i % higher_stride;
int t = j / axis_stride;
int l = j % axis_stride;
y[i] = axis_bases[t][h * higher_strides[t] + l];
}
}
static __global__ void ConcatBackwardKernel(int N, float **axis_bases,
int *higher_strides, float *dEdY, int higher_stride, int axis_stride) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
int h = i / higher_stride;
int j = i % higher_stride;
int t = j / axis_stride;
int l = j % axis_stride;
axis_bases[t][h * higher_strides[t] + l] += dEdY[i];
}
}
class ConcatNodeGPU : public Node {
public:
ConcatNodeGPU(initializer_view<Expression> values, int axis) : Node(values), axis_(axis) {}
virtual void FreeMemory(Device *device) override {
if (axis_bases_)
device->FreeMemoryPinned(axis_bases_);
if (axis_bases_backward_)
device->FreeMemoryPinned(axis_bases_backward_);
if (higher_strides_)
device->FreeMemoryPinned(higher_strides_);
}
virtual void Forward(Graph *graph, const std::vector<const Tensor *> &x, Tensor *y) const override {
const Shape &y_shape = y->GetShape();
int N = y_shape.GetSize();
int axis_stride = y_shape.GetSizeRange(axis_ + 1, y_shape.GetRank());
int axis_total = y_shape.GetDim(axis_);
int higher_stride = axis_stride * axis_total;
int higher_size = y_shape.GetSizeRange(0, axis_);
axis_bases_ = (float**)graph->GetDevice()->AllocateMemoryPinned(sizeof(float*) * axis_total);
higher_strides_ = (int*)graph->GetDevice()->AllocateMemoryPinned(sizeof(int) * axis_total);
int k = 0;
for (size_t i = 0; i < x.size(); i++) {
int cur_axis_total = x[i]->GetShape().GetDim(axis_);
float *cur_data_base = x[i]->GetData();
for (int j = 0; j < cur_axis_total; j++) {
axis_bases_[k] = cur_data_base;
higher_strides_[k] = cur_axis_total * axis_stride;
cur_data_base += axis_stride;
k++;
}
}
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
ConcatForwardKernel<<<blocksPerGrid, threadsPerBlock>>>(N, axis_bases_, higher_strides_,
y->GetData(), higher_stride, axis_stride);
CUDA_CALL(cudaDeviceSynchronize());
CUDA_CALL(cudaGetLastError());
}
virtual void Backward(Graph *graph, const std::vector<const Tensor *> &x, const Tensor *y,
const Tensor *dEdY, const std::vector<Tensor *> &dEdX) const override {
const Shape &y_shape = y->GetShape();
int N = y_shape.GetSize();
int axis_stride = y_shape.GetSizeRange(axis_ + 1, y_shape.GetRank());
int axis_total = y_shape.GetDim(axis_);
int higher_stride = axis_stride * axis_total;
int higher_size = y_shape.GetSizeRange(0, axis_);
axis_bases_backward_ = (float**)graph->GetDevice()->AllocateMemoryPinned(sizeof(float*) * axis_total);
int k = 0;
for (size_t i = 0; i < x.size(); i++) {
int cur_axis_total = x[i]->GetShape().GetDim(axis_);
float *cur_data_base = dEdX[i]->GetData();
for (int j = 0; j < cur_axis_total; j++) {
axis_bases_backward_[k] = cur_data_base;
cur_data_base += axis_stride;
k++;
}
}
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
ConcatBackwardKernel<<<blocksPerGrid, threadsPerBlock>>>(N, axis_bases_backward_, higher_strides_,
dEdY->GetData(), higher_stride, axis_stride);
CUDA_CALL(cudaGetLastError());
}
private:
int axis_;
mutable float **axis_bases_ = nullptr, **axis_bases_backward_ = nullptr;
mutable int *higher_strides_ = nullptr;
};
template<typename Dummy>
struct ConcatNodeFactory<Dummy, GPU> {
Node *Create(initializer_view<Expression> values, int axis) {
return new ConcatNodeGPU(values, axis);
}
};
template struct ConcatNodeFactory<void, GPU>;
static __global__ void DropoutForwardKernel(int n, float p, float mul_scale,
const float *probs, const float *x, float *y) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
if (probs[i] <= p)
y[i] = 0.f;
else
y[i] = x[i] * mul_scale;
}
}
static __global__ void DropoutBackwardKernel(int n, float p, float mul_scale,
const float *probs, const float *dEdY, float *dEdX) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
if (probs[i] > p)
dEdX[i] += dEdY[i] * mul_scale;
}
}
class DropoutNodeGPU : public Node {
public:
DropoutNodeGPU(int node, float p) : Node{ node }, p_(p) {}
virtual void FreeMemory(Device *device) {
device->FreeMemory(probs_);
}
virtual void Forward(Graph *graph, const std::vector<const Tensor *> &x, Tensor *y) const override {
const float *x_data = x[0]->GetData();
float *y_data = y->GetData();
int size = x[0]->GetBatchSize() * x[0]->GetShape().GetSize();
probs_ = (float *)graph->GetDevice()->AllocateMemory(size * sizeof(float));
curandGenerator_t generator = graph->GetDevice()->GetCuRANDGenerator();
CURAND_CALL(curandGenerateUniform(generator, probs_, size));
float scale = 1.f / (1.f - p_);
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (size + threadsPerBlock - 1) / threadsPerBlock;
DropoutForwardKernel<<<blocksPerGrid, threadsPerBlock>>>(size, p_, scale, probs_, x_data, y_data);
}
virtual void Backward(Graph *graph, const std::vector<const Tensor *> &x, const Tensor *y,
const Tensor *dEdY, const std::vector<Tensor *> &dEdX) const override {
const float *dEdY_data = dEdY->GetData();
float *dEdX_data = dEdX[0]->GetData();
int size = x[0]->GetBatchSize() * x[0]->GetShape().GetSize();
float scale = 1.f - p_;
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (size + threadsPerBlock - 1) / threadsPerBlock;
DropoutBackwardKernel<<<blocksPerGrid, threadsPerBlock>>>(size, p_, scale, probs_, dEdY_data, dEdX_data);
}
private:
float p_;
mutable float *probs_;
};
template<typename Dummy>
struct DropoutNodeFactory<Dummy, GPU> {
Node *Create(int node, float p) {
return new DropoutNodeGPU(node, p);
}
};
template struct DropoutNodeFactory<void, GPU>;
class SoftmaxNodeGPU : public Node {
public:
SoftmaxNodeGPU(int node) : Node{ node } {}
virtual void Forward(Graph *graph, const std::vector<const Tensor *> &x, Tensor *y) const override {
// y = exp(x_i) / sum(exp(x_i))
const Shape &input_shape = x[0]->GetShape();
int size = input_shape.GetSizeRange(0, input_shape.GetRank() - 1);
size *= x[0]->GetBatchSize();
int dim_size = input_shape.GetDim(input_shape.GetRank() - 1);
const float *x_data = x[0]->GetData();
float *y_data = y->GetData();
float alpha = 1.f, beta = 0.f;
cudnnHandle_t cudnn_handle = graph->GetDevice()->GetCuDNNHandle();
cudnnTensorDescriptor_t tensor_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&tensor_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(tensor_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, size, dim_size, 1, 1));
CUDNN_CALL(cudnnSoftmaxForward(cudnn_handle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL,
&alpha, tensor_desc, x_data, &beta, tensor_desc, y_data));
CUDNN_CALL(cudnnDestroyTensorDescriptor(tensor_desc));
}
virtual void Backward(Graph *graph, const std::vector<const Tensor *> &x, const Tensor *y,
const Tensor *dEdY, const std::vector<Tensor *> &dEdX) const override {
// dY/dX_i = y_i*dEdy_i - y_i*sum_j{y_j*dEdy_j}
const Shape &input_shape = x[0]->GetShape();
int size = x[0]->GetShape().GetSizeRange(0, input_shape.GetRank() - 1);
size *= x[0]->GetBatchSize();
int dim_size = input_shape.GetDim(input_shape.GetRank() - 1);
const float *y_data = y->GetData();
const float *dEdY_data = dEdY->GetData();
float *dEdX_data = dEdX[0]->GetData();
float alpha = 1.f, beta = 1.f;
cudnnHandle_t cudnn_handle = graph->GetDevice()->GetCuDNNHandle();
cudnnTensorDescriptor_t tensor_desc;
CUDNN_CALL(cudnnCreateTensorDescriptor(&tensor_desc));
CUDNN_CALL(cudnnSetTensor4dDescriptor(tensor_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, size, dim_size, 1, 1));
CUDNN_CALL(cudnnSoftmaxBackward(cudnn_handle, CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_CHANNEL,
&alpha, tensor_desc, y_data, tensor_desc, dEdY_data, &beta, tensor_desc, dEdX_data));
CUDNN_CALL(cudnnDestroyTensorDescriptor(tensor_desc));
}
};
template<typename Dummy>
struct SoftmaxNodeFactory<Dummy, GPU> {
Node *Create(int node) {
return new SoftmaxNodeGPU(node);
}
};
template struct SoftmaxNodeFactory<void, GPU>;
static __global__ void CrossEntropyForward(const float *x, float *y, const int *labels, int N, int dim_size) {
// y = -log(x_k)
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
int label = labels[i];
y[i] = -log(x[dim_size * i + label]);
}
}
static __global__ void CrossEntropyBackward(const float *x, const int *labels,
const float *dEdY, float *dEdX, int N, int dim_size) {
// dY/dX_k = -1/X_k
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) {
int label = labels[i];
dEdX[dim_size * i + label] -= dEdY[i] * (1.f / x[dim_size * i + label]);
}
}
class CrossEntropyNodeGPU : public Node {
public:
CrossEntropyNodeGPU(Graph *graph, int node, const std::vector<int> &labels) : Node{ node } {
int size = (int)labels.size() * sizeof(int);
labels_pinned_ = (int*)graph->GetDevice()->AllocateMemoryPinned(size);
memcpy(labels_pinned_, labels.data(), size);
labels_data_ = (int *)graph->GetDevice()->AllocateMemory(size);
CUDA_CALL(cudaMemcpyAsync(labels_data_, labels_pinned_, size, cudaMemcpyHostToDevice));
}
virtual void FreeMemory(Device *device) override {
device->FreeMemoryPinned(labels_pinned_);
device->FreeMemory(labels_data_);
}
virtual void Forward(Graph *graph, const std::vector<const Tensor *> &x, Tensor *y) const override {
const Shape &input_shape = x[0]->GetShape();
int size = input_shape.GetSizeRange(0, input_shape.GetRank() - 2);
size *= x[0]->GetBatchSize();
int dim_size = input_shape.GetDim(input_shape.GetRank() - 1);
const float *x_data = x[0]->GetData();
float *y_data = y->GetData();
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (size + threadsPerBlock - 1) / threadsPerBlock;
CrossEntropyForward<<<blocksPerGrid, threadsPerBlock>>>(x_data, y_data, labels_data_, size, dim_size);
CUDA_CALL(cudaGetLastError());
}
virtual void Backward(Graph *graph, const std::vector<const Tensor *> &x, const Tensor *y,
const Tensor *dEdY, const std::vector<Tensor *> &dEdX) const override {
const Shape &input_shape = x[0]->GetShape();
int size = input_shape.GetSizeRange(0, input_shape.GetRank() - 2);
size *= x[0]->GetBatchSize();
int dim_size = input_shape.GetDim(input_shape.GetRank() - 1);
const float *x_data = x[0]->GetData();
const float *dEdY_data = dEdY->GetData();
float *dEdX_data = dEdX[0]->GetData();
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (size + threadsPerBlock - 1) / threadsPerBlock;
CrossEntropyBackward<<<blocksPerGrid, threadsPerBlock>>>(
x_data, labels_data_, dEdY_data, dEdX_data, size, dim_size);
CUDA_CALL(cudaGetLastError());
}
private:
int *labels_pinned_, *labels_data_;
};
template<typename Dummy>
struct CrossEntropyNodeFactory<Dummy, GPU> {
Node *Create(Graph *graph, int node, const std::vector<int> &labels) {
return new CrossEntropyNodeGPU(graph, node, labels);
}
};
template struct CrossEntropyNodeFactory<void, GPU>;
static __global__ void ClassificationAccuracyKernel(const float *input, const int *expected, float *output,
int batch_size, int size) {
int batch_id = blockDim.x * blockIdx.x + threadIdx.x;
if (batch_id < batch_size) {
int max_index = 0;
float max_value = input[batch_id * size];
for (int i = 1; i < size; i++) {
float current = input[batch_id * size + i];
if (current > max_value) {
max_value = current;
max_index = i;
}
}
if (max_index == expected[batch_id])
output[batch_id] = 1.f;
else
output[batch_id] = 0.f;
}
}
class ClassificationAccuracyNodeGPU : public Node {
public:
ClassificationAccuracyNodeGPU(Graph *graph, int node, const std::vector<int> &labels) : Node{ node } {
int size = (int)labels.size() * sizeof(int);
// We use CUDA's automatic data migration feature since we only need the labels once
labels_pinned_ = (int*)graph->GetDevice()->AllocateMemoryPinned(size);
memcpy(labels_pinned_, labels.data(), size);
}
virtual void FreeMemory(Device *device) override {
device->FreeMemoryPinned(labels_pinned_);
}
virtual void Forward(Graph *graph, const std::vector<const Tensor *> &x, Tensor *y) const override {
const Shape &input_shape = x[0]->GetShape();
int size = input_shape.GetSizeRange(0, input_shape.GetRank() - 2);
size *= x[0]->GetBatchSize();
int dim_size = input_shape.GetDim(input_shape.GetRank() - 1);
const float *x_data = x[0]->GetData();
float *y_data = y->GetData();
int threadsPerBlock = kThreadsPerBlock;
int blocksPerGrid = (size + threadsPerBlock - 1) / threadsPerBlock;
ClassificationAccuracyKernel<<<blocksPerGrid, threadsPerBlock>>>(
x_data, labels_pinned_, y_data, size, dim_size);
CUDA_CALL(cudaGetLastError());
}
virtual void Backward(Graph *graph, const std::vector<const Tensor *> &x, const Tensor *y,
const Tensor *dEdY, const std::vector<Tensor *> &dEdX) const override {
REPORT_ERROR("Backward propagation is unsupported for this expression.");
}
private:
int *labels_pinned_;
};
template<typename Dummy>
struct ClassificationAccuracyNodeFactory<Dummy, GPU> {
Node *Create(Graph *graph, int node, const std::vector<int> &labels) {
return new ClassificationAccuracyNodeGPU(graph, node, labels);
}
};
template struct ClassificationAccuracyNodeFactory<void, GPU>;
|
16d2761962b256bf5c1934645a39149cc3a9c8e3.hip | // !!! This is a file automatically generated by hipify!!!
//#include <hip/hip_runtime.h>
//#include <stdio.h>
//#include <stdlib.h>
//#include <errno.h>
//#include <math.h>
#include "rte.h"
//#include <pthread.h>
//#include <cutil.h>
//#include <hip/hip_runtime.h>
//#include <cutil_inline.h>
extern Geometry *geom;
extern Phantom *phan;
extern Source *beam_src;
extern complex_double *diag_terms_host;
extern complex_double *sph_harm;
extern Info_Stat *info_stat_host;
extern SHORT nL;
extern int nTerms;
void Neumann(complex_double* src_host, complex_double *out_host, int flag) // if flag = 1 ,run the entire Neumann series, else just the final absorption term
{
unsigned int timer;
timer = 0;
cutilCheckError(cutCreateTimer(&timer));
int i, j,k,r_ind,cnt;
int n;
int thread_index, tid;
int num_layers_gpu[NUM_DEVICES];
pthread_t thread_data[NUM_DEVICES];
THREAD_PARAMETERS thread_parameters[NUM_DEVICES];
int size_layer = ( geom->nX + 2*geom->bounX ) * ( geom->nY + 2*geom->bounY ) * ( nL+1) * (nL+1);
int size = size_layer * (geom->nZ + 2*geom->bounZ);
int num_layers_per_gpu = (int) floorf(geom->nZ / ( NUM_DEVICES));
int rem = geom->nZ % (NUM_DEVICES);
for (thread_index = 0; thread_index < NUM_DEVICES; thread_index++){
num_layers_gpu[thread_index] = num_layers_per_gpu;
if(rem > thread_index){
num_layers_gpu[thread_index] += 1;
}
}
complex_double *W_out_host;
W_out_host = (complex_double *) malloc ( sizeof(complex_double)*size);
n =2;
double fi=1.0;
if(flag == 1){
while(!StopIteration(&fi,n,src_host,out_host)){
n++;
// for (n=0;n<nTerms-2;n++) {
// printf("%d term of Neumann series \n",n);
memset(W_out_host, 0, sizeof(complex_double)*size);
#if 0
printf("Before the prop_abs function \n");
for (i=geom->bounZ;i<geom->nZ + geom->bounZ;i++) {
for (j=geom->bounY;j<geom->nY + geom->bounY;j++) {
for (k=geom->bounX;k<geom->nX + geom->bounX;k++) {
for (cnt=0;cnt< (nL+1)*(nL+1) ;cnt = cnt +2) {
r_ind = i* (geom->nX + 2*geom->bounX )* (geom->nY + 2*geom->bounY) + j* (geom->nX + 2*geom->bounX) + k;
// if((W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))]).real() != (W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))]).real()){
if((j==geom->nY/2 && k == geom->nX/2 && cnt==0)|| src_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real() > 1000.0 || W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real() > 1000.0){
printf("%e % e %e , %d (%d %d %d) %d \n", out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(), W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(), src_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(),cnt, i-geom->bounX,j-geom->bounY,k-geom->bounZ, VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1)));
}}}}}
#endif
for(thread_index = 0; thread_index < NUM_DEVICES; ++thread_index) {
thread_parameters[thread_index].device_index = thread_index + MY_START_DEVICE;
thread_parameters[thread_index].src_host = src_host;
thread_parameters[thread_index].num_layers = num_layers_gpu[thread_index];
thread_parameters[thread_index].layer_start = 0 ;
for(tid = 0; tid < thread_index; tid++){
thread_parameters[thread_index].layer_start += num_layers_gpu[tid];
}
thread_parameters[thread_index].out_host = W_out_host + ( thread_parameters[thread_index].layer_start + geom->bounZ) * size_layer ;
if(phan->g != 0.0){
// printf("Medium is anisotropic \n");
thread_parameters[thread_index].flag = 1;
}
else
thread_parameters[thread_index].flag = 0 ;
thread_parameters[thread_index].flag_scat = 1 ;
pthread_create(& thread_data[thread_index], NULL, prop_abs, &thread_parameters[thread_index]);
}
for(thread_index = 0; thread_index < NUM_DEVICES; thread_index++) {
pthread_join(thread_data[thread_index], NULL);
}
#if 0
printf("Before the prop_scat function \n");
for (i=geom->bounZ;i<geom->nZ + geom->bounZ;i++) {
for (j=geom->bounY;j<geom->nY + geom->bounY;j++) {
for (k=geom->bounX;k<geom->nX + geom->bounX;k++) {
for (cnt=0;cnt< (nL+1)*(nL+1) ;cnt = cnt +2) {
r_ind = i* (geom->nX + 2*geom->bounX )* (geom->nY + 2*geom->bounY) + j* (geom->nX + 2*geom->bounX) + k;
// if((W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))]).real() != (W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))]).real()){
if((j==geom->nY/2 -1 && k == geom->nX/2 && cnt==0)){//|| fabs(src_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].imag()) > 0 || fabs(W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].imag()) > 0.0){
printf("%e % e %e , %d (%d %d %d) %d \n", out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(), W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(), W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].imag(),cnt, i-geom->bounX,j-geom->bounY,k-geom->bounZ, VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1)));
}}}}}
#endif
// prop_scat(W_out_host, src_host);
// copy_dist(W_out_host,src_host);
#if 0
printf("After the prop_scat function \n");
for (i=geom->bounZ;i<geom->nZ + geom->bounZ;i++) {
for (j=geom->bounY;j<geom->nY + geom->bounY;j++) {
for (k=geom->bounX;k<geom->nX + geom->bounX;k++) {
for (cnt=0;cnt< (nL+1)*(nL+1) ;cnt = cnt +2) {
r_ind = i* (geom->nX + 2*geom->bounX )* (geom->nY + 2*geom->bounY) + j* (geom->nX + 2*geom->bounX) + k;
// if((W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))]).real() != (W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))]).real()){
if((j==geom->nY/2 && k == geom->nX/2 && cnt==0)|| src_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real() > 1000.0 || W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real() > 1000.0){
printf("%e % e %e , %d (%d %d %d) %d \n", out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(), W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(), src_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(),cnt, i-geom->bounX,j-geom->bounY,k-geom->bounZ, VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1)));
}}}}}
#endif
//add_dist( src_host, out_host, out_host);
add_dist( W_out_host, out_host, out_host);
copy_dist(W_out_host,src_host);
#if 0
printf("\n \n \n \n \n \n \n After the add_dist function \n");
for (i=0;i<geom->nZ + 2*geom->bounZ;i++) {
for (j=0;j<geom->nY + 2*geom->bounY;j++) {
for (k=0;k<geom->nX + 2*geom->bounX;k++) {
for (cnt=0;cnt< (nL+1)*(nL+1) ;cnt = cnt +2) {
r_ind = i* (geom->nX + 2*geom->bounX )* (geom->nY + 2*geom->bounY) + j* (geom->nX + 2*geom->bounX) + k;
// if((out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))]).real() != (out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))]).real()){
if((j==geom->nY/2 && k == geom->nX/2 && cnt==0) || out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real() > 1000.0){
printf("%e % e %e , %d (%d %d %d) %d \n", out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(), W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(), src_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(),cnt, i-geom->bounX,j-geom->bounY,k-geom->bounZ, VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1)));
}}}}}
#endif
//n++;
//printf("Neumann series for %d terms complete \n", n);
}
#if 0
printf("\n \n \n \n \n \n \n Before the final prop_abs function \n");
for (i=0;i<geom->nZ + 2*geom->bounZ;i++) {
for (j=0;j<geom->nY + 2*geom->bounY;j++) {
for (k=0;k<geom->nX + 2*geom->bounX;k++) {
for (cnt=0;cnt< (nL+1)*(nL+1) ;cnt = cnt +2) {
r_ind = i* (geom->nX + 2*geom->bounX )* (geom->nY + 2*geom->bounY) + j* (geom->nX + 2*geom->bounX) + k;
// if((out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))]).real() != (out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))]).real()){
if((j==geom->nY/2 && k == geom->nX/2 && cnt==0) || out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real() > 1000.0){
printf("%e % e %e , %d (%d %d %d) %d \n", out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(), W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(), src_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(),cnt, i-geom->bounX,j-geom->bounY,k-geom->bounZ, VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1)));
}}}}}
#endif
}
//printf("Calling the absorption kernel for the last time\n");
memset(W_out_host, 0, sizeof(complex_double)*size);
cutilCheckError(cutResetTimer(timer));
cutilCheckError(cutStartTimer(timer));
for(thread_index = 0; thread_index < NUM_DEVICES; ++thread_index) {
thread_parameters[thread_index].device_index = thread_index + MY_START_DEVICE;
thread_parameters[thread_index].src_host = out_host;
thread_parameters[thread_index].num_layers = num_layers_gpu[thread_index];
thread_parameters[thread_index].layer_start = 0 ;
for(tid = 0; tid < thread_index; tid++){
thread_parameters[thread_index].layer_start += num_layers_gpu[tid];
}
thread_parameters[thread_index].out_host = W_out_host + (thread_index*num_layers_per_gpu + geom->bounZ) * size_layer ;
thread_parameters[thread_index].flag = 1 ;
thread_parameters[thread_index].flag_scat = 0 ;
pthread_create(& thread_data[thread_index], NULL, prop_abs, &thread_parameters[thread_index]);
}
for(thread_index = 0; thread_index < NUM_DEVICES; ++thread_index) {
pthread_join(thread_data[thread_index], NULL);
}
#if 0
printf("\n \n \n \n \n \n \n After the final prop_abs function \n");
for (i=0;i<geom->nZ + 2*geom->bounZ;i++) {
for (j=0;j<geom->nY + 2*geom->bounY;j++) {
for (k=0;k<geom->nX + 2*geom->bounX;k++) {
for (cnt=0;cnt< (nL+1)*(nL+1) ;cnt = cnt +2) {
r_ind = i* (geom->nX + 2*geom->bounX )* (geom->nY + 2*geom->bounY) + j* (geom->nX + 2*geom->bounX) + k;
// if((out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))]).real() != (out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))]).real()){
if(j==geom->nY/2 && k == geom->nX/2 && cnt==0){
printf("%e % e %e , %d (%d %d %d) %d \n", out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(), W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(), src_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(),cnt, i-geom->bounX,j-geom->bounY,k-geom->bounZ, VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1)));
}}}}}
#endif
// printf("Time taken for the absorption kernel :%f ms \n", cutGetTimerValue(timer));
copy_dist(W_out_host, out_host);
}
| 16d2761962b256bf5c1934645a39149cc3a9c8e3.cu | //#include <cuda.h>
//#include <stdio.h>
//#include <stdlib.h>
//#include <errno.h>
//#include <math.h>
#include "rte.h"
//#include <pthread.h>
//#include <cutil.h>
//#include <cuda_runtime.h>
//#include <cutil_inline.h>
extern Geometry *geom;
extern Phantom *phan;
extern Source *beam_src;
extern complex_double *diag_terms_host;
extern complex_double *sph_harm;
extern Info_Stat *info_stat_host;
extern SHORT nL;
extern int nTerms;
void Neumann(complex_double* src_host, complex_double *out_host, int flag) // if flag = 1 ,run the entire Neumann series, else just the final absorption term
{
unsigned int timer;
timer = 0;
cutilCheckError(cutCreateTimer(&timer));
int i, j,k,r_ind,cnt;
int n;
int thread_index, tid;
int num_layers_gpu[NUM_DEVICES];
pthread_t thread_data[NUM_DEVICES];
THREAD_PARAMETERS thread_parameters[NUM_DEVICES];
int size_layer = ( geom->nX + 2*geom->bounX ) * ( geom->nY + 2*geom->bounY ) * ( nL+1) * (nL+1);
int size = size_layer * (geom->nZ + 2*geom->bounZ);
int num_layers_per_gpu = (int) floorf(geom->nZ / ( NUM_DEVICES));
int rem = geom->nZ % (NUM_DEVICES);
for (thread_index = 0; thread_index < NUM_DEVICES; thread_index++){
num_layers_gpu[thread_index] = num_layers_per_gpu;
if(rem > thread_index){
num_layers_gpu[thread_index] += 1;
}
}
complex_double *W_out_host;
W_out_host = (complex_double *) malloc ( sizeof(complex_double)*size);
n =2;
double fi=1.0;
if(flag == 1){
while(!StopIteration(&fi,n,src_host,out_host)){
n++;
// for (n=0;n<nTerms-2;n++) {
// printf("%d term of Neumann series \n",n);
memset(W_out_host, 0, sizeof(complex_double)*size);
#if 0
printf("Before the prop_abs function \n");
for (i=geom->bounZ;i<geom->nZ + geom->bounZ;i++) {
for (j=geom->bounY;j<geom->nY + geom->bounY;j++) {
for (k=geom->bounX;k<geom->nX + geom->bounX;k++) {
for (cnt=0;cnt< (nL+1)*(nL+1) ;cnt = cnt +2) {
r_ind = i* (geom->nX + 2*geom->bounX )* (geom->nY + 2*geom->bounY) + j* (geom->nX + 2*geom->bounX) + k;
// if((W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))]).real() != (W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))]).real()){
if((j==geom->nY/2 && k == geom->nX/2 && cnt==0)|| src_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real() > 1000.0 || W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real() > 1000.0){
printf("%e % e %e , %d (%d %d %d) %d \n", out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(), W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(), src_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(),cnt, i-geom->bounX,j-geom->bounY,k-geom->bounZ, VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1)));
}}}}}
#endif
for(thread_index = 0; thread_index < NUM_DEVICES; ++thread_index) {
thread_parameters[thread_index].device_index = thread_index + MY_START_DEVICE;
thread_parameters[thread_index].src_host = src_host;
thread_parameters[thread_index].num_layers = num_layers_gpu[thread_index];
thread_parameters[thread_index].layer_start = 0 ;
for(tid = 0; tid < thread_index; tid++){
thread_parameters[thread_index].layer_start += num_layers_gpu[tid];
}
thread_parameters[thread_index].out_host = W_out_host + ( thread_parameters[thread_index].layer_start + geom->bounZ) * size_layer ;
if(phan->g != 0.0){
// printf("Medium is anisotropic \n");
thread_parameters[thread_index].flag = 1;
}
else
thread_parameters[thread_index].flag = 0 ;
thread_parameters[thread_index].flag_scat = 1 ;
pthread_create(& thread_data[thread_index], NULL, prop_abs, &thread_parameters[thread_index]);
}
for(thread_index = 0; thread_index < NUM_DEVICES; thread_index++) {
pthread_join(thread_data[thread_index], NULL);
}
#if 0
printf("Before the prop_scat function \n");
for (i=geom->bounZ;i<geom->nZ + geom->bounZ;i++) {
for (j=geom->bounY;j<geom->nY + geom->bounY;j++) {
for (k=geom->bounX;k<geom->nX + geom->bounX;k++) {
for (cnt=0;cnt< (nL+1)*(nL+1) ;cnt = cnt +2) {
r_ind = i* (geom->nX + 2*geom->bounX )* (geom->nY + 2*geom->bounY) + j* (geom->nX + 2*geom->bounX) + k;
// if((W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))]).real() != (W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))]).real()){
if((j==geom->nY/2 -1 && k == geom->nX/2 && cnt==0)){//|| fabs(src_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].imag()) > 0 || fabs(W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].imag()) > 0.0){
printf("%e % e %e , %d (%d %d %d) %d \n", out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(), W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(), W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].imag(),cnt, i-geom->bounX,j-geom->bounY,k-geom->bounZ, VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1)));
}}}}}
#endif
// prop_scat(W_out_host, src_host);
// copy_dist(W_out_host,src_host);
#if 0
printf("After the prop_scat function \n");
for (i=geom->bounZ;i<geom->nZ + geom->bounZ;i++) {
for (j=geom->bounY;j<geom->nY + geom->bounY;j++) {
for (k=geom->bounX;k<geom->nX + geom->bounX;k++) {
for (cnt=0;cnt< (nL+1)*(nL+1) ;cnt = cnt +2) {
r_ind = i* (geom->nX + 2*geom->bounX )* (geom->nY + 2*geom->bounY) + j* (geom->nX + 2*geom->bounX) + k;
// if((W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))]).real() != (W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))]).real()){
if((j==geom->nY/2 && k == geom->nX/2 && cnt==0)|| src_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real() > 1000.0 || W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real() > 1000.0){
printf("%e % e %e , %d (%d %d %d) %d \n", out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(), W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(), src_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(),cnt, i-geom->bounX,j-geom->bounY,k-geom->bounZ, VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1)));
}}}}}
#endif
//add_dist( src_host, out_host, out_host);
add_dist( W_out_host, out_host, out_host);
copy_dist(W_out_host,src_host);
#if 0
printf("\n \n \n \n \n \n \n After the add_dist function \n");
for (i=0;i<geom->nZ + 2*geom->bounZ;i++) {
for (j=0;j<geom->nY + 2*geom->bounY;j++) {
for (k=0;k<geom->nX + 2*geom->bounX;k++) {
for (cnt=0;cnt< (nL+1)*(nL+1) ;cnt = cnt +2) {
r_ind = i* (geom->nX + 2*geom->bounX )* (geom->nY + 2*geom->bounY) + j* (geom->nX + 2*geom->bounX) + k;
// if((out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))]).real() != (out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))]).real()){
if((j==geom->nY/2 && k == geom->nX/2 && cnt==0) || out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real() > 1000.0){
printf("%e % e %e , %d (%d %d %d) %d \n", out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(), W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(), src_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(),cnt, i-geom->bounX,j-geom->bounY,k-geom->bounZ, VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1)));
}}}}}
#endif
//n++;
//printf("Neumann series for %d terms complete \n", n);
}
#if 0
printf("\n \n \n \n \n \n \n Before the final prop_abs function \n");
for (i=0;i<geom->nZ + 2*geom->bounZ;i++) {
for (j=0;j<geom->nY + 2*geom->bounY;j++) {
for (k=0;k<geom->nX + 2*geom->bounX;k++) {
for (cnt=0;cnt< (nL+1)*(nL+1) ;cnt = cnt +2) {
r_ind = i* (geom->nX + 2*geom->bounX )* (geom->nY + 2*geom->bounY) + j* (geom->nX + 2*geom->bounX) + k;
// if((out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))]).real() != (out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))]).real()){
if((j==geom->nY/2 && k == geom->nX/2 && cnt==0) || out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real() > 1000.0){
printf("%e % e %e , %d (%d %d %d) %d \n", out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(), W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(), src_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(),cnt, i-geom->bounX,j-geom->bounY,k-geom->bounZ, VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1)));
}}}}}
#endif
}
//printf("Calling the absorption kernel for the last time\n");
memset(W_out_host, 0, sizeof(complex_double)*size);
cutilCheckError(cutResetTimer(timer));
cutilCheckError(cutStartTimer(timer));
for(thread_index = 0; thread_index < NUM_DEVICES; ++thread_index) {
thread_parameters[thread_index].device_index = thread_index + MY_START_DEVICE;
thread_parameters[thread_index].src_host = out_host;
thread_parameters[thread_index].num_layers = num_layers_gpu[thread_index];
thread_parameters[thread_index].layer_start = 0 ;
for(tid = 0; tid < thread_index; tid++){
thread_parameters[thread_index].layer_start += num_layers_gpu[tid];
}
thread_parameters[thread_index].out_host = W_out_host + (thread_index*num_layers_per_gpu + geom->bounZ) * size_layer ;
thread_parameters[thread_index].flag = 1 ;
thread_parameters[thread_index].flag_scat = 0 ;
pthread_create(& thread_data[thread_index], NULL, prop_abs, &thread_parameters[thread_index]);
}
for(thread_index = 0; thread_index < NUM_DEVICES; ++thread_index) {
pthread_join(thread_data[thread_index], NULL);
}
#if 0
printf("\n \n \n \n \n \n \n After the final prop_abs function \n");
for (i=0;i<geom->nZ + 2*geom->bounZ;i++) {
for (j=0;j<geom->nY + 2*geom->bounY;j++) {
for (k=0;k<geom->nX + 2*geom->bounX;k++) {
for (cnt=0;cnt< (nL+1)*(nL+1) ;cnt = cnt +2) {
r_ind = i* (geom->nX + 2*geom->bounX )* (geom->nY + 2*geom->bounY) + j* (geom->nX + 2*geom->bounX) + k;
// if((out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))]).real() != (out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))]).real()){
if(j==geom->nY/2 && k == geom->nX/2 && cnt==0){
printf("%e % e %e , %d (%d %d %d) %d \n", out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(), W_out_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(), src_host[VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1))].real(),cnt, i-geom->bounX,j-geom->bounY,k-geom->bounZ, VOX_TO_SPIND(r_ind,cnt,(nL+1)*(nL+1)));
}}}}}
#endif
// printf("Time taken for the absorption kernel :%f ms \n", cutGetTimerValue(timer));
copy_dist(W_out_host, out_host);
}
|
b3cc2900f7bbe4464c9fb2138a6d654428677a1d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "wtoolkit_cuda.h"
#include <algorithm>
#include <iostream>
#include <iomanip>
#include "bboxes.cu.h"
#include "wmacros.h"
using namespace std;
constexpr auto kBlockSize = 64;
#ifdef GOOGLE_CUDA
/*
* anchor boxesgbboxes
*
* gbboxes:[gb_size,4] (ymin,xmin,ymax,xmax)ground truth box
* anchor_bboxes:[ab_size,4] (ymin,xmin,ymax,xmax)box
* :
* scores:[ab_size]iou
* indexs:[ab_size]gbboxes
* is_boundary_box:anchor_bboxes
*/
__global__ void get_scores_and_indexs(const float* gbboxes,const float* anchor_bboxes,float* scores,int* indexs,bool* is_boundary_box,size_t gb_size,size_t ab_size)
{
const auto a_index = blockIdx.x;
const auto g_offset = threadIdx.x;
auto max_i = -1;
auto max_s = 1e-8;
float abbox[4];
float gbbox[4];
__shared__ int max_index[kBlockSize];
__shared__ float max_scores[kBlockSize];
/*
* gbboxeskBlockSizeanchor box(a_index)ground truth box(max_i,max_s)
*/
for(auto i=0; i<4; ++i)
abbox[i] = (anchor_bboxes+(a_index<<2))[i];
#ifdef PROCESS_BOUNDARY_ANCHORS
if(cuda_is_cross_boundaries(abbox)) {
is_boundary_box[a_index] = true;
return;
}
#endif
for(auto i=g_offset; i<gb_size; i += blockDim.x) {
for(auto j=0; j<4; ++j)
gbbox[j] = (gbboxes+(i<<2))[j];
const auto cs = cuda_bboxes_jaccard(abbox,gbbox);
//const auto cs = cuda_bboxes_jaccard(abbox,gbboxes+(i<<2));
if(cs>max_s) {
max_i = i;
max_s = cs;
}
}
max_index[g_offset] = max_i;
max_scores[g_offset] = max_s;
__syncthreads();
if(g_offset != 0) return;
/*
* 0
*/
max_i = -1;
max_s = 1e-8;
for(auto i=0; i<blockDim.x; ++i) {
const auto cs = max_scores[i];
if(cs>max_s) {
max_i = i;
max_s = cs;
}
}
if(max_i>=0) {
indexs[a_index] = max_index[max_i];
scores[a_index] = max_s;
}
}
/*
* ground truth boxanchor box
* gbboxes:[gb_size,4]
* anchor_bboxes: [ab_size,4]
* is_boundary_box:[ab_size]
* :
* is_max_score:[ab_size]
* scores0:[gb_size]
* indexs0:[gb_size]
*/
__global__ void find_max_score_index(const float* gbboxes,const float* anchor_bboxes,const bool* is_boundary_box,bool* is_max_score,size_t ab_size)
{
const auto g_index = blockIdx.x;
const auto a_offset = threadIdx.x;
auto max_i = -1;
auto max_s = MIN_SCORE_FOR_POS_BOX;
float gbbox[4];
float abbox[4];
__shared__ int max_index[kBlockSize];
__shared__ float max_scores[kBlockSize];
/*
* anchor bboxeskBlockSizegbboxes(g_index)anchor boxes
*/
for(auto i=0; i<4; ++i)
gbbox[i] = (gbboxes+(g_index<<2))[i];
for(auto i=a_offset; i<ab_size; i += blockDim.x) {
for(auto j=0; j<4; ++j)
abbox[j] = (anchor_bboxes+(i<<2))[j];
if(is_boundary_box[i]) continue;
const auto cs = cuda_bboxes_jaccard(gbbox,abbox);
if(cs>max_s) {
max_i = i;
max_s = cs;
}
}
max_index[a_offset] = max_i;
max_scores[a_offset] = max_s;
__syncthreads();
if(a_offset != 0) return;
/*
* 0anchor box
*/
max_i = -1;
max_s = MIN_SCORE_FOR_POS_BOX;
for(auto i=0; i<blockDim.x; ++i) {
const auto cs = max_scores[i];
if(cs>max_s) {
max_i = i;
max_s = cs;
}
}
if(max_i>=0)
is_max_score[max_index[max_i]] = true;
}
__global__ void get_labels_and_remove_indices(int* indexs,float* scores,const bool* is_max_score,const int* glabels,int* out_labels,bool* remove_indices,float neg_threshold,float pos_threshold)
{
auto a_index = blockIdx.x;
const auto &index = indexs[a_index];
const auto score = scores[a_index];
if((score>=pos_threshold) || (score<neg_threshold) || is_max_score[a_index]) {
remove_indices[a_index] = false;
if((score>=pos_threshold) || is_max_score[a_index]) {
out_labels[a_index] = glabels[index];
} else {
out_labels[a_index] = 0;
indexs[a_index] = -1;
scores[a_index] = 0;
}
} else {
remove_indices[a_index] = true;
indexs[a_index] = -1;
scores[a_index] = 0.0f;
}
}
__global__ void get_bboxes_regression(float* out_boxes,const float* anchor_bboxes,const float* gbboxes,const int* out_labels,const bool* out_remove_indices,const int* out_index,float* prio_scaling)
{
auto j = blockIdx.x; //a_index
auto outbox = out_boxes+j*4;
if((out_labels[j]<1) || (out_remove_indices[j])) {
return;
}
auto box = anchor_bboxes+j *4;
auto gbox = gbboxes+out_index[j] *4;
auto yxhw = cuda_box_minmax_to_cxywh(box);
auto yref = std::get<0>(yxhw);
auto xref = std::get<1>(yxhw);
auto href = std::get<2>(yxhw);
auto wref = std::get<3>(yxhw);
if((href<1E-8) || (wref<1E-8)) {
return;
}
auto gyxhw = cuda_box_minmax_to_cxywh(gbox);
auto feat_cy = std::get<0>(gyxhw);
auto feat_cx = std::get<1>(gyxhw);
auto feat_h = std::get<2>(gyxhw);
auto feat_w = std::get<3>(gyxhw);
outbox[0] = (feat_cy-yref)/(href*prio_scaling[0]);
outbox[1] = (feat_cx-xref)/(wref*prio_scaling[1]);
outbox[2] = log(feat_h/href)/prio_scaling[2];
outbox[3] = log(feat_w/wref)/prio_scaling[3];
}
__global__ void bboxes_decode_kernel(const float* anchor_bboxes,const float* regs,const float* prio_scaling,float* out_bboxes,size_t data_nr)
{
const auto b = threadIdx.x+blockIdx.x *blockDim.x;
if(b>=data_nr) return;
const auto base_offset = b *4;
const auto regs_data = regs+base_offset;
const auto box_data = anchor_bboxes+base_offset;
float y;
float x;
float href;
float wref;
auto xywh = cuda_box_minmax_to_cxywh(box_data);
y = std::get<0>(xywh);
x = std::get<1>(xywh);
href = std::get<2>(xywh);
wref = std::get<3>(xywh);
auto cy = clamp<float>(regs_data[0] *prio_scaling[0],-10.0f,10.0f) *href+y;
auto cx = clamp<float>(regs_data[1] *prio_scaling[1],-10.0f,10.0f) *wref+x;
auto h = href *exp(clamp<float>(regs_data[2] *prio_scaling[2],-10.0,10.0));
auto w = wref *exp(clamp<float>(regs_data[3] *prio_scaling[3],-10.0,10.0));
auto output_data = out_bboxes + base_offset;
const auto minmax = cuda_box_cxywh_to_minmax(cy,cx,h,w);
output_data[0] = clamp<float>(std::get<0>(minmax),0.0,1.0);
output_data[1] = clamp<float>(std::get<1>(minmax),0.0,1.0);
output_data[2] = clamp<float>(std::get<2>(minmax),0.0,1.0);
output_data[3] = clamp<float>(std::get<3>(minmax),0.0,1.0);
if(output_data[0]>output_data[2])
output_data[2] = output_data[0];
if(output_data[1]>output_data[3])
output_data[3] = output_data[1];
}
__host__ void get_encodes(const float* gbboxes,const float* anchor_bboxes,const int* glabels,
float* out_boxes,float* out_scores,int* out_labels,bool* out_remove_indices,int* out_index,const float* prio_scaling,
size_t gb_size,size_t ab_size,float neg_threshold,float pos_threshold,bool max_overlap_as_pos=true)
{
cuda_unique_ptr<int> g_out_index;
if(nullptr == out_index) {
g_out_index = make_cuda_unique<int>(ab_size);
out_index = g_out_index.get();
}
CHECK_OK(hipMemset(out_boxes,0,sizeof(float)*4*ab_size));
CHECK_OK(hipMemset(out_scores,0,sizeof(float)*ab_size));
CHECK_OK(hipMemset(out_index,0xff,sizeof(int)*ab_size));
CHECK_OK(hipMemset(out_labels,0,sizeof(int)*ab_size));
dim3 grid(ab_size);
dim3 grid1(gb_size);
auto d_is_boundary_box = make_cuda_unique<bool>((unsigned char)(0x00),ab_size);
cuda_unique_ptr<bool> d_is_max_score = make_cuda_unique<bool>((unsigned char)(0x00),ab_size);
hipLaunchKernelGGL(( get_scores_and_indexs), dim3(grid),dim3(std::min<size_t>(kBlockSize,gb_size)), 0, 0, gbboxes,anchor_bboxes,out_scores,out_index,d_is_boundary_box.get(),gb_size,ab_size);
CHECK_CUDA_ERRORS(hipPeekAtLastError());
hipDeviceSynchronize();
if(max_overlap_as_pos) {
hipLaunchKernelGGL(( find_max_score_index), dim3(grid1),dim3(std::min<size_t>(kBlockSize,ab_size)), 0, 0, gbboxes,anchor_bboxes,d_is_boundary_box.get(),d_is_max_score.get(),ab_size);
CHECK_CUDA_ERRORS(hipPeekAtLastError());
hipDeviceSynchronize();
}
hipDeviceSynchronize();
CHECK_CUDA_ERRORS(hipPeekAtLastError());
hipLaunchKernelGGL(( get_labels_and_remove_indices), dim3(grid),dim3(1), 0, 0, out_index,out_scores,d_is_max_score.get(),glabels,out_labels,out_remove_indices,neg_threshold,pos_threshold);
CHECK_CUDA_ERRORS(hipPeekAtLastError());
hipDeviceSynchronize();
cuda_unique_ptr<float> d_prio_scaling = make_cuda_unique<float>(prio_scaling,4);
hipLaunchKernelGGL(( get_bboxes_regression), dim3(grid),dim3(1), 0, 0, out_boxes,anchor_bboxes,gbboxes,out_labels,out_remove_indices,out_index,d_prio_scaling.get());
CHECK_CUDA_ERRORS(hipPeekAtLastError());
hipDeviceSynchronize();
}
void bboxes_decode_by_gpu(const float* anchor_bboxes,const float* regs,const float* prio_scaling,float* out_bboxes,size_t data_nr)
{
if(0 == data_nr)
return;
cuda_unique_ptr<float> d_prio_scaling = make_cuda_unique<float>(prio_scaling,4);
const auto block_size = std::min<size_t>(data_nr,128);
const auto grid_size = (data_nr+block_size-1)/block_size;
hipLaunchKernelGGL(( bboxes_decode_kernel), dim3(grid_size),dim3(block_size), 0, 0, anchor_bboxes,regs,d_prio_scaling.get(),out_bboxes,data_nr);
CHECK_CUDA_ERRORS(hipPeekAtLastError());
hipDeviceSynchronize();
}
__global__ void boxes_pair_jaccard_kernel(const float* bboxes, float* jaccard, int data_nr)
{
const auto index = threadIdx.x+blockIdx.x *blockDim.x;
const auto src_index = index/data_nr;
const auto dst_index = index%data_nr;
if(index>= data_nr*data_nr)
return;
jaccard[index] = cuda_bboxes_jaccard(bboxes+(src_index<<2),bboxes+(dst_index<<2));
}
void boxes_pair_jaccard(const float* _bboxes,float* _jaccard,int data_nr)
{
auto bboxes = make_cuda_unique(_bboxes,data_nr);
auto jaccard = make_cuda_unique<float>(data_nr*data_nr);
boxes_pair_jaccard_gpu_mem(bboxes.get(),jaccard.get(),data_nr);
CHECK_OK(hipMemcpy(_jaccard,jaccard.get(),data_nr*data_nr*sizeof(float),hipMemcpyDeviceToHost));
}
void boxes_pair_jaccard_gpu_mem(const float* bboxes,float* jaccard,int data_nr)
{
const auto grid_size = (data_nr*data_nr+kBlockSize-1)/kBlockSize;
hipLaunchKernelGGL(( boxes_pair_jaccard_kernel), dim3(data_nr),dim3(kBlockSize), 0, 0, bboxes,jaccard,data_nr);
hipDeviceSynchronize();
}
#endif
| b3cc2900f7bbe4464c9fb2138a6d654428677a1d.cu | #include <vector>
#include "wtoolkit_cuda.h"
#include <algorithm>
#include <iostream>
#include <iomanip>
#include "bboxes.cu.h"
#include "wmacros.h"
using namespace std;
constexpr auto kBlockSize = 64;
#ifdef GOOGLE_CUDA
/*
* 找到与每一个anchor boxes相对就的最大的gbboxes
*
* gbboxes:[gb_size,4] (ymin,xmin,ymax,xmax)表示ground truth box
* anchor_bboxes:[ab_size,4] (ymin,xmin,ymax,xmax)表示待匹配的box
* 输出:
* scores:[ab_size]相应的iou得分
* indexs:[ab_size]与之相对应的gbboxes索引
* is_boundary_box:表示anchor_bboxes是否在边界上
*/
__global__ void get_scores_and_indexs(const float* gbboxes,const float* anchor_bboxes,float* scores,int* indexs,bool* is_boundary_box,size_t gb_size,size_t ab_size)
{
const auto a_index = blockIdx.x;
const auto g_offset = threadIdx.x;
auto max_i = -1;
auto max_s = 1e-8;
float abbox[4];
float gbbox[4];
__shared__ int max_index[kBlockSize];
__shared__ float max_scores[kBlockSize];
/*
* gbboxes按kBlockSize划分为多个组,下面的代码找到在同一个组中与给定anchor box(a_index)对应的最大ground truth box(max_i,max_s)
*/
for(auto i=0; i<4; ++i)
abbox[i] = (anchor_bboxes+(a_index<<2))[i];
#ifdef PROCESS_BOUNDARY_ANCHORS
if(cuda_is_cross_boundaries(abbox)) {
is_boundary_box[a_index] = true;
return;
}
#endif
for(auto i=g_offset; i<gb_size; i += blockDim.x) {
for(auto j=0; j<4; ++j)
gbbox[j] = (gbboxes+(i<<2))[j];
const auto cs = cuda_bboxes_jaccard(abbox,gbbox);
//const auto cs = cuda_bboxes_jaccard(abbox,gbboxes+(i<<2));
if(cs>max_s) {
max_i = i;
max_s = cs;
}
}
max_index[g_offset] = max_i;
max_scores[g_offset] = max_s;
__syncthreads();
if(g_offset != 0) return;
/*
* 线程0在所有的组中找到最大的一个
*/
max_i = -1;
max_s = 1e-8;
for(auto i=0; i<blockDim.x; ++i) {
const auto cs = max_scores[i];
if(cs>max_s) {
max_i = i;
max_s = cs;
}
}
if(max_i>=0) {
indexs[a_index] = max_index[max_i];
scores[a_index] = max_s;
}
}
/*
* 找到与每一个ground truth box相对应的最大的anchor box
* gbboxes:[gb_size,4]
* anchor_bboxes: [ab_size,4]
* is_boundary_box:[ab_size]
* 输出:
* is_max_score:[ab_size]
* scores0:[gb_size]
* indexs0:[gb_size]
*/
__global__ void find_max_score_index(const float* gbboxes,const float* anchor_bboxes,const bool* is_boundary_box,bool* is_max_score,size_t ab_size)
{
const auto g_index = blockIdx.x;
const auto a_offset = threadIdx.x;
auto max_i = -1;
auto max_s = MIN_SCORE_FOR_POS_BOX;
float gbbox[4];
float abbox[4];
__shared__ int max_index[kBlockSize];
__shared__ float max_scores[kBlockSize];
/*
* anchor bboxes按kBlockSize分组,这部分找到在一个组里与指定的gbboxes(g_index)对应的最大的anchor boxes
*/
for(auto i=0; i<4; ++i)
gbbox[i] = (gbboxes+(g_index<<2))[i];
for(auto i=a_offset; i<ab_size; i += blockDim.x) {
for(auto j=0; j<4; ++j)
abbox[j] = (anchor_bboxes+(i<<2))[j];
if(is_boundary_box[i]) continue;
const auto cs = cuda_bboxes_jaccard(gbbox,abbox);
if(cs>max_s) {
max_i = i;
max_s = cs;
}
}
max_index[a_offset] = max_i;
max_scores[a_offset] = max_s;
__syncthreads();
if(a_offset != 0) return;
/*
* 线程0找到唯一的最大anchor box索引
*/
max_i = -1;
max_s = MIN_SCORE_FOR_POS_BOX;
for(auto i=0; i<blockDim.x; ++i) {
const auto cs = max_scores[i];
if(cs>max_s) {
max_i = i;
max_s = cs;
}
}
if(max_i>=0)
is_max_score[max_index[max_i]] = true;
}
__global__ void get_labels_and_remove_indices(int* indexs,float* scores,const bool* is_max_score,const int* glabels,int* out_labels,bool* remove_indices,float neg_threshold,float pos_threshold)
{
auto a_index = blockIdx.x;
const auto &index = indexs[a_index];
const auto score = scores[a_index];
if((score>=pos_threshold) || (score<neg_threshold) || is_max_score[a_index]) {
remove_indices[a_index] = false;
if((score>=pos_threshold) || is_max_score[a_index]) {
out_labels[a_index] = glabels[index];
} else {
out_labels[a_index] = 0;
indexs[a_index] = -1;
scores[a_index] = 0;
}
} else {
remove_indices[a_index] = true;
indexs[a_index] = -1;
scores[a_index] = 0.0f;
}
}
__global__ void get_bboxes_regression(float* out_boxes,const float* anchor_bboxes,const float* gbboxes,const int* out_labels,const bool* out_remove_indices,const int* out_index,float* prio_scaling)
{
auto j = blockIdx.x; //a_index
auto outbox = out_boxes+j*4;
if((out_labels[j]<1) || (out_remove_indices[j])) {
return;
}
auto box = anchor_bboxes+j *4;
auto gbox = gbboxes+out_index[j] *4;
auto yxhw = cuda_box_minmax_to_cxywh(box);
auto yref = std::get<0>(yxhw);
auto xref = std::get<1>(yxhw);
auto href = std::get<2>(yxhw);
auto wref = std::get<3>(yxhw);
if((href<1E-8) || (wref<1E-8)) {
return;
}
auto gyxhw = cuda_box_minmax_to_cxywh(gbox);
auto feat_cy = std::get<0>(gyxhw);
auto feat_cx = std::get<1>(gyxhw);
auto feat_h = std::get<2>(gyxhw);
auto feat_w = std::get<3>(gyxhw);
outbox[0] = (feat_cy-yref)/(href*prio_scaling[0]);
outbox[1] = (feat_cx-xref)/(wref*prio_scaling[1]);
outbox[2] = log(feat_h/href)/prio_scaling[2];
outbox[3] = log(feat_w/wref)/prio_scaling[3];
}
__global__ void bboxes_decode_kernel(const float* anchor_bboxes,const float* regs,const float* prio_scaling,float* out_bboxes,size_t data_nr)
{
const auto b = threadIdx.x+blockIdx.x *blockDim.x;
if(b>=data_nr) return;
const auto base_offset = b *4;
const auto regs_data = regs+base_offset;
const auto box_data = anchor_bboxes+base_offset;
float y;
float x;
float href;
float wref;
auto xywh = cuda_box_minmax_to_cxywh(box_data);
y = std::get<0>(xywh);
x = std::get<1>(xywh);
href = std::get<2>(xywh);
wref = std::get<3>(xywh);
auto cy = clamp<float>(regs_data[0] *prio_scaling[0],-10.0f,10.0f) *href+y;
auto cx = clamp<float>(regs_data[1] *prio_scaling[1],-10.0f,10.0f) *wref+x;
auto h = href *exp(clamp<float>(regs_data[2] *prio_scaling[2],-10.0,10.0));
auto w = wref *exp(clamp<float>(regs_data[3] *prio_scaling[3],-10.0,10.0));
auto output_data = out_bboxes + base_offset;
const auto minmax = cuda_box_cxywh_to_minmax(cy,cx,h,w);
output_data[0] = clamp<float>(std::get<0>(minmax),0.0,1.0);
output_data[1] = clamp<float>(std::get<1>(minmax),0.0,1.0);
output_data[2] = clamp<float>(std::get<2>(minmax),0.0,1.0);
output_data[3] = clamp<float>(std::get<3>(minmax),0.0,1.0);
if(output_data[0]>output_data[2])
output_data[2] = output_data[0];
if(output_data[1]>output_data[3])
output_data[3] = output_data[1];
}
__host__ void get_encodes(const float* gbboxes,const float* anchor_bboxes,const int* glabels,
float* out_boxes,float* out_scores,int* out_labels,bool* out_remove_indices,int* out_index,const float* prio_scaling,
size_t gb_size,size_t ab_size,float neg_threshold,float pos_threshold,bool max_overlap_as_pos=true)
{
cuda_unique_ptr<int> g_out_index;
if(nullptr == out_index) {
g_out_index = make_cuda_unique<int>(ab_size);
out_index = g_out_index.get();
}
CHECK_OK(cudaMemset(out_boxes,0,sizeof(float)*4*ab_size));
CHECK_OK(cudaMemset(out_scores,0,sizeof(float)*ab_size));
CHECK_OK(cudaMemset(out_index,0xff,sizeof(int)*ab_size));
CHECK_OK(cudaMemset(out_labels,0,sizeof(int)*ab_size));
dim3 grid(ab_size);
dim3 grid1(gb_size);
auto d_is_boundary_box = make_cuda_unique<bool>((unsigned char)(0x00),ab_size);
cuda_unique_ptr<bool> d_is_max_score = make_cuda_unique<bool>((unsigned char)(0x00),ab_size);
get_scores_and_indexs<<<grid,std::min<size_t>(kBlockSize,gb_size)>>>(gbboxes,anchor_bboxes,out_scores,out_index,d_is_boundary_box.get(),gb_size,ab_size);
CHECK_CUDA_ERRORS(cudaPeekAtLastError());
cudaDeviceSynchronize();
if(max_overlap_as_pos) {
find_max_score_index<<<grid1,std::min<size_t>(kBlockSize,ab_size)>>>(gbboxes,anchor_bboxes,d_is_boundary_box.get(),d_is_max_score.get(),ab_size);
CHECK_CUDA_ERRORS(cudaPeekAtLastError());
cudaDeviceSynchronize();
}
cudaDeviceSynchronize();
CHECK_CUDA_ERRORS(cudaPeekAtLastError());
get_labels_and_remove_indices<<<grid,1>>>(out_index,out_scores,d_is_max_score.get(),glabels,out_labels,out_remove_indices,neg_threshold,pos_threshold);
CHECK_CUDA_ERRORS(cudaPeekAtLastError());
cudaDeviceSynchronize();
cuda_unique_ptr<float> d_prio_scaling = make_cuda_unique<float>(prio_scaling,4);
get_bboxes_regression<<<grid,1>>>(out_boxes,anchor_bboxes,gbboxes,out_labels,out_remove_indices,out_index,d_prio_scaling.get());
CHECK_CUDA_ERRORS(cudaPeekAtLastError());
cudaDeviceSynchronize();
}
void bboxes_decode_by_gpu(const float* anchor_bboxes,const float* regs,const float* prio_scaling,float* out_bboxes,size_t data_nr)
{
if(0 == data_nr)
return;
cuda_unique_ptr<float> d_prio_scaling = make_cuda_unique<float>(prio_scaling,4);
const auto block_size = std::min<size_t>(data_nr,128);
const auto grid_size = (data_nr+block_size-1)/block_size;
bboxes_decode_kernel<<<grid_size,block_size>>>(anchor_bboxes,regs,d_prio_scaling.get(),out_bboxes,data_nr);
CHECK_CUDA_ERRORS(cudaPeekAtLastError());
cudaDeviceSynchronize();
}
__global__ void boxes_pair_jaccard_kernel(const float* bboxes, float* jaccard, int data_nr)
{
const auto index = threadIdx.x+blockIdx.x *blockDim.x;
const auto src_index = index/data_nr;
const auto dst_index = index%data_nr;
if(index>= data_nr*data_nr)
return;
jaccard[index] = cuda_bboxes_jaccard(bboxes+(src_index<<2),bboxes+(dst_index<<2));
}
void boxes_pair_jaccard(const float* _bboxes,float* _jaccard,int data_nr)
{
auto bboxes = make_cuda_unique(_bboxes,data_nr);
auto jaccard = make_cuda_unique<float>(data_nr*data_nr);
boxes_pair_jaccard_gpu_mem(bboxes.get(),jaccard.get(),data_nr);
CHECK_OK(cudaMemcpy(_jaccard,jaccard.get(),data_nr*data_nr*sizeof(float),cudaMemcpyDeviceToHost));
}
void boxes_pair_jaccard_gpu_mem(const float* bboxes,float* jaccard,int data_nr)
{
const auto grid_size = (data_nr*data_nr+kBlockSize-1)/kBlockSize;
boxes_pair_jaccard_kernel<<<data_nr,kBlockSize>>>(bboxes,jaccard,data_nr);
cudaDeviceSynchronize();
}
#endif
|
8cd91bf4ee173bc3edc96c69baa4eee889cf460f.hip | // !!! This is a file automatically generated by hipify!!!
#include "headers/headers_mains.h"
#include <helper_cuda.h>
#include "headers/device_bin.h"
#include "headers/device_init.h"
#include "headers/device_dedisperse.h"
#include "headers/device_dedispersion_kernel.h"
#include "headers/device_zero_dm.h"
#include "headers/device_zero_dm_outliers.h"
#include "headers/device_rfi.h"
#include "headers/device_SPS_inplace_kernel.h" //Added by KA
#include "headers/device_SPS_inplace.h" //Added by KA
#include "headers/device_MSD_BLN_grid.h" //Added by KA
#include "headers/device_MSD_BLN_pw.h" //Added by KA
//#include "headers/device_MSD_BLN_pw_dp.h" //Added by KA
#include "headers/device_MSD_grid.h" //Added by KA
#include "headers/device_MSD_plane.h" //Added by KA
#include "headers/device_MSD_limited.h" //Added by KA
#include "headers/device_SNR_limited.h" //Added by KA
#include "headers/device_SPS_long.h" //Added by KA
#include "headers/device_threshold.h" //Added by KA
#include "headers/device_single_FIR.h" //Added by KA
#include "headers/device_analysis.h" //Added by KA
#include "headers/device_periods.h" //Added by KA
#include "headers/device_peak_find.h" //Added by KA
#include "headers/device_power.h"
#include "headers/device_harmonic_summing.h"
#include "headers/device_load_data.h"
#include "headers/device_corner_turn.h"
#include "headers/device_save_data.h"
#include "headers/host_acceleration.h"
#include "headers/host_allocate_memory.h"
#include "headers/host_analysis.h"
#include "headers/host_periods.h"
#include "headers/host_debug.h"
#include "headers/host_get_file_data.h"
#include "headers/host_get_recorded_data.h"
#include "headers/host_get_user_input.h"
#include "headers/host_help.h"
#include "headers/host_rfi.h"
#include "headers/host_stratagy.h"
#include "headers/host_write_file.h"
// fdas
#include "headers/device_acceleration_fdas.h"
#include "headers/host_main_function.h"
#include "headers/params.h"
#include "timer.h"
void main_function
(
int argc,
char* argv[],
// Internal code variables
// File pointers
FILE *fp,
// Counters and flags
int i,
int t,
int dm_range,
int range,
int enable_debug,
int enable_analysis,
int enable_acceleration,
int enable_output_ffdot_plan,
int enable_output_fdas_list,
int enable_periodicity,
int output_dmt,
int enable_zero_dm,
int enable_zero_dm_with_outliers,
int enable_rfi,
int enable_sps_baselinenoise,
int enable_fdas_custom_fft,
int enable_fdas_inbin,
int enable_fdas_norm,
int *inBin,
int *outBin,
int *ndms,
int maxshift,
int max_ndms,
int max_samps,
int num_tchunks,
int total_ndms,
int multi_file,
float max_dm,
// Memory sizes and pointers
size_t inputsize,
size_t outputsize,
size_t gpu_inputsize,
size_t gpu_outputsize,
size_t gpu_memory,
unsigned short *input_buffer,
float ***output_buffer,
unsigned short *d_input,
float *d_output,
float *dmshifts,
float *user_dm_low,
float *user_dm_high,
float *user_dm_step,
float *dm_low,
float *dm_high,
float *dm_step,
// Telescope parameters
int nchans,
int nsamp,
int nbits,
int nsamples,
int nifs,
int **t_processed,
int nboots,
int ntrial_bins,
int navdms,
int nsearch,
float aggression,
float narrow,
float wide,
int maxshift_original,
double tsamp_original,
long int inc,
float tstart,
float tstart_local,
float tsamp,
float fch1,
float foff,
// Analysis variables
float power,
float sigma_cutoff,
float sigma_constant,
float max_boxcar_width_in_sec,
clock_t start_time,
int candidate_algorithm,
int nb_selected_dm,
float *selected_dm_low,
float *selected_dm_high,
int analysis_debug,
int failsafe,
float periodicity_sigma_cutoff,
int periodicity_nHarmonics
)
{
// Initialise the GPU.
init_gpu(argc, argv, enable_debug, &gpu_memory);
if(enable_debug == 1) debug(2, start_time, range, outBin, enable_debug, enable_analysis, output_dmt, multi_file, sigma_cutoff, power, max_ndms, user_dm_low, user_dm_high,
user_dm_step, dm_low, dm_high, dm_step, ndms, nchans, nsamples, nifs, nbits, tsamp, tstart, fch1, foff, maxshift, max_dm, nsamp, gpu_inputsize, gpu_outputsize, inputsize, outputsize);
checkCudaErrors(hipGetLastError());
// Calculate the dedispersion stratagy.
stratagy(&maxshift, &max_samps, &num_tchunks, &max_ndms, &total_ndms, &max_dm, power, nchans, nsamp, fch1, foff, tsamp, range, user_dm_low, user_dm_high, user_dm_step,
&dm_low, &dm_high, &dm_step, &ndms, &dmshifts, inBin, &t_processed, &gpu_memory, Get_memory_requirement_of_SPS());
if(enable_debug == 1) debug(4, start_time, range, outBin, enable_debug, enable_analysis, output_dmt, multi_file, sigma_cutoff, power, max_ndms, user_dm_low, user_dm_high,
user_dm_step, dm_low, dm_high, dm_step, ndms, nchans, nsamples, nifs, nbits, tsamp, tstart, fch1, foff, maxshift, max_dm, nsamp, gpu_inputsize, gpu_outputsize, inputsize, outputsize);
checkCudaErrors(hipGetLastError());
// Allocate memory on host and device.
allocate_memory_cpu_output(&fp, gpu_memory, maxshift, num_tchunks, max_ndms, total_ndms, nsamp, nchans, nbits, range, ndms, t_processed, &input_buffer, &output_buffer, &d_input, &d_output,
&gpu_inputsize, &gpu_outputsize, &inputsize, &outputsize);
if(enable_debug == 1) debug(5, start_time, range, outBin, enable_debug, enable_analysis, output_dmt, multi_file, sigma_cutoff, power, max_ndms, user_dm_low, user_dm_high,
user_dm_step, dm_low, dm_high, dm_step, ndms, nchans, nsamples, nifs, nbits, tsamp, tstart, fch1, foff, maxshift, max_dm, nsamp, gpu_inputsize, gpu_outputsize, inputsize, outputsize);
checkCudaErrors(hipGetLastError());
// Allocate memory on host and device.
allocate_memory_gpu(&fp, gpu_memory, maxshift, num_tchunks, max_ndms, total_ndms, nsamp, nchans, nbits, range, ndms, t_processed, &input_buffer, &output_buffer, &d_input, &d_output,
&gpu_inputsize, &gpu_outputsize, &inputsize, &outputsize);
if(enable_debug == 1) debug(5, start_time, range, outBin, enable_debug, enable_analysis, output_dmt, multi_file, sigma_cutoff, power, max_ndms, user_dm_low, user_dm_high,
user_dm_step, dm_low, dm_high, dm_step, ndms, nchans, nsamples, nifs, nbits, tsamp, tstart, fch1, foff, maxshift, max_dm, nsamp, gpu_inputsize, gpu_outputsize, inputsize, outputsize);
checkCudaErrors(hipGetLastError());
// Clip RFI
//rfi(nsamp, nchans, &input_buffer);
/*
FILE *fp_o;
if ((fp_o=fopen("rfi_clipped.dat", "wb")) == NULL) {
fprintf(stderr, "Error opening output file!\n");
exit(0);
}
fwrite(input_buffer, nchans*nsamp*sizeof(unsigned short), 1, fp_o);
*/
printf("\nDe-dispersing...");
GpuTimer timer;
timer.Start();
tsamp_original = tsamp;
maxshift_original = maxshift;
//float *out_tmp;
//out_tmp = (float *) malloc(( t_processed[0][0] + maxshift ) * max_ndms * sizeof(float));
//memset(out_tmp, 0.0f, t_processed[0][0] + maxshift * max_ndms * sizeof(float));
for (t = 0; t < num_tchunks; t++)
{
printf("\nt_processed:\t%d, %d", t_processed[0][t], t);
checkCudaErrors(hipGetLastError());
load_data(-1, inBin, d_input, &input_buffer[(long int) ( inc * nchans )], t_processed[0][t], maxshift, nchans, dmshifts);
checkCudaErrors(hipGetLastError());
if (enable_zero_dm)
{
zero_dm(d_input, nchans, t_processed[0][t]+maxshift);
}
checkCudaErrors(hipGetLastError());
if (enable_zero_dm_with_outliers)
{
zero_dm_outliers(d_input, nchans, t_processed[0][t]+maxshift);
}
checkCudaErrors(hipGetLastError());
corner_turn(d_input, d_output, nchans, t_processed[0][t] + maxshift);
checkCudaErrors(hipGetLastError());
if (enable_rfi)
{
rfi_gpu(d_input, nchans, t_processed[0][t]+maxshift);
}
checkCudaErrors(hipGetLastError());
int oldBin = 1;
for (dm_range = 0; dm_range < range; dm_range++) {
printf("\n\n%f\t%f\t%f\t%d", dm_low[dm_range], dm_high[dm_range], dm_step[dm_range], ndms[dm_range]), fflush(stdout);
printf("\nAmount of telescope time processed: %f", tstart_local);
maxshift = maxshift_original / inBin[dm_range];
checkCudaErrors(hipGetLastError());
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
load_data(dm_range, inBin, d_input, &input_buffer[(long int) ( inc * nchans )], t_processed[dm_range][t], maxshift, nchans, dmshifts);
checkCudaErrors(hipGetLastError());
if (inBin[dm_range] > oldBin)
{
bin_gpu(d_input, d_output, nchans, t_processed[dm_range - 1][t] + maxshift * inBin[dm_range]);
( tsamp ) = ( tsamp ) * 2.0f;
}
checkCudaErrors(hipGetLastError());
dedisperse(dm_range, t_processed[dm_range][t], inBin, dmshifts, d_input, d_output, nchans, ( t_processed[dm_range][t] + maxshift ), maxshift, &tsamp, dm_low, dm_high, dm_step, ndms, nbits, failsafe);
checkCudaErrors(hipGetLastError());
if ( (enable_acceleration == 1) || (enable_periodicity == 1) || (analysis_debug ==1) )
{
// gpu_outputsize = ndms[dm_range] * ( t_processed[dm_range][t] ) * sizeof(float);
//save_data(d_output, out_tmp, gpu_outputsize);
//#pragma omp parallel for
for (int k = 0; k < ndms[dm_range]; k++)
{
//memcpy(&output_buffer[dm_range][k][inc / inBin[dm_range]], &out_tmp[k * t_processed[dm_range][t]], sizeof(float) * t_processed[dm_range][t]);
save_data_offset(d_output, k * t_processed[dm_range][t], output_buffer[dm_range][k], inc / inBin[dm_range], sizeof(float) * t_processed[dm_range][t]);
}
// save_data(d_output, &output_buffer[dm_range][0][((long int)inc)/inBin[dm_range]], gpu_outputsize);
}
if (output_dmt == 1)
{
//for (int k = 0; k < ndms[dm_range]; k++)
// write_output(dm_range, t_processed[dm_range][t], ndms[dm_range], gpu_memory, output_buffer[dm_range][k], gpu_outputsize, dm_low, dm_high);
//write_output(dm_range, t_processed[dm_range][t], ndms[dm_range], gpu_memory, out_tmp, gpu_outputsize, dm_low, dm_high);
}
checkCudaErrors(hipGetLastError());
if (enable_analysis == 1) {
printf("\n VALUE OF ANALYSIS DEBUG IS %d\n", analysis_debug);
if (analysis_debug == 1)
{
float *out_tmp;
gpu_outputsize = ndms[dm_range] * ( t_processed[dm_range][t] ) * sizeof(float);
out_tmp = (float *) malloc(( t_processed[0][0] + maxshift ) * max_ndms * sizeof(float));
memset(out_tmp, 0.0f, t_processed[0][0] + maxshift * max_ndms * sizeof(float));
save_data(d_output, out_tmp, gpu_outputsize);
analysis_CPU(dm_range, tstart_local, t_processed[dm_range][t], (t_processed[dm_range][t]+maxshift), nchans, maxshift, max_ndms, ndms, outBin, sigma_cutoff, out_tmp,dm_low, dm_high, dm_step, tsamp, max_boxcar_width_in_sec);
free(out_tmp);
}
else
{
float *h_peak_list;
size_t max_peak_size;
size_t peak_pos;
max_peak_size = (size_t) ( ndms[dm_range]*t_processed[dm_range][t]/2 );
h_peak_list = (float*) malloc(max_peak_size*4*sizeof(float));
peak_pos=0;
analysis_GPU(h_peak_list, &peak_pos, max_peak_size, dm_range, tstart_local, t_processed[dm_range][t], inBin[dm_range], outBin[dm_range], &maxshift, max_ndms, ndms, sigma_cutoff, sigma_constant, max_boxcar_width_in_sec, d_output, dm_low, dm_high, dm_step, tsamp, candidate_algorithm, enable_sps_baselinenoise);
free(h_peak_list);
}
// This is for testing purposes and should be removed or commented out
//analysis_CPU(dm_range, tstart_local, t_processed[dm_range][t], (t_processed[dm_range][t]+maxshift), nchans, maxshift, max_ndms, ndms, outBin, sigma_cutoff, out_tmp,dm_low, dm_high, dm_step, tsamp);
}
oldBin = inBin[dm_range];
}
//memset(out_tmp, 0.0f, t_processed[0][0] + maxshift * max_ndms * sizeof(float));
inc = inc + t_processed[0][t];
printf("\nINC:\t%ld", inc);
tstart_local = ( tsamp_original * inc );
tsamp = tsamp_original;
maxshift = maxshift_original;
}
timer.Stop();
float time = timer.Elapsed() / 1000;
printf("\n\n === OVERALL DEDISPERSION THROUGHPUT INCLUDING SYNCS AND DATA TRANSFERS ===\n");
printf("\n(Performed Brute-Force Dedispersion: %g (GPU estimate)", time);
printf("\nAmount of telescope time processed: %f", tstart_local);
printf("\nNumber of samples processed: %ld", inc);
printf("\nReal-time speedup factor: %lf", ( tstart_local ) / time);
hipFree(d_input);
hipFree(d_output);
//free(out_tmp);
free(input_buffer);
double time_processed = ( tstart_local ) / tsamp_original;
double dm_t_processed = time_processed * total_ndms;
double all_processed = dm_t_processed * nchans;
printf("\nGops based on %.2lf ops per channel per tsamp: %f", NOPS, ( ( NOPS * all_processed ) / ( time ) ) / 1000000000.0);
int num_reg = SNUMREG;
float num_threads = total_ndms * ( t_processed[0][0] ) / ( num_reg );
float data_size_loaded = ( num_threads * nchans * sizeof(ushort) ) / 1000000000;
float time_in_sec = time;
float bandwidth = data_size_loaded / time_in_sec;
printf("\nDevice global memory bandwidth in GB/s: %f", bandwidth);
printf("\nDevice shared memory bandwidth in GB/s: %f", bandwidth * ( num_reg ));
float size_gb = ( nchans * ( t_processed[0][0] ) * sizeof(float) * 8 ) / 1000000000.0;
printf("\nTelescope data throughput in Gb/s: %f", size_gb / time_in_sec);
if (enable_periodicity == 1)
{
//
GpuTimer timer;
timer.Start();
//
GPU_periodicity(range, nsamp, max_ndms, inc, periodicity_sigma_cutoff, output_buffer, ndms, inBin, dm_low, dm_high, dm_step, tsamp_original, periodicity_nHarmonics, candidate_algorithm, enable_sps_baselinenoise, sigma_constant);
//
timer.Stop();
float time = timer.Elapsed()/1000;
printf("\n\n === OVERALL PERIODICITY THROUGHPUT INCLUDING SYNCS AND DATA TRANSFERS ===\n");
printf("\nPerformed Peroidicity Location: %f (GPU estimate)", time);
printf("\nAmount of telescope time processed: %f", tstart_local);
printf("\nNumber of samples processed: %ld", inc);
printf("\nReal-time speedup factor: %f", ( tstart_local ) / ( time ));
}
if (enable_acceleration == 1)
{
// Input needed for fdas is output_buffer which is DDPlan
// Assumption: gpu memory is free and available
//
GpuTimer timer;
timer.Start();
// acceleration(range, nsamp, max_ndms, inc, nboots, ntrial_bins, navdms, narrow, wide, nsearch, aggression, sigma_cutoff, output_buffer, ndms, inBin, dm_low, dm_high, dm_step, tsamp_original);
acceleration_fdas(range, nsamp, max_ndms, inc, nboots, ntrial_bins, navdms, narrow, wide, nsearch, aggression, sigma_cutoff,
output_buffer, ndms, inBin, dm_low, dm_high, dm_step, tsamp_original, enable_fdas_custom_fft, enable_fdas_inbin, enable_fdas_norm, sigma_constant, enable_output_ffdot_plan, enable_output_fdas_list);
//
timer.Stop();
float time = timer.Elapsed()/1000;
printf("\n\n === OVERALL TDAS THROUGHPUT INCLUDING SYNCS AND DATA TRANSFERS ===\n");
printf("\nPerformed Acceleration Location: %lf (GPU estimate)", time);
printf("\nAmount of telescope time processed: %f", tstart_local);
printf("\nNumber of samples processed: %ld", inc);
printf("\nReal-time speedup factor: %lf", ( tstart_local ) / ( time ));
}
}
| 8cd91bf4ee173bc3edc96c69baa4eee889cf460f.cu | #include "headers/headers_mains.h"
#include <helper_cuda.h>
#include "headers/device_bin.h"
#include "headers/device_init.h"
#include "headers/device_dedisperse.h"
#include "headers/device_dedispersion_kernel.h"
#include "headers/device_zero_dm.h"
#include "headers/device_zero_dm_outliers.h"
#include "headers/device_rfi.h"
#include "headers/device_SPS_inplace_kernel.h" //Added by KA
#include "headers/device_SPS_inplace.h" //Added by KA
#include "headers/device_MSD_BLN_grid.h" //Added by KA
#include "headers/device_MSD_BLN_pw.h" //Added by KA
//#include "headers/device_MSD_BLN_pw_dp.h" //Added by KA
#include "headers/device_MSD_grid.h" //Added by KA
#include "headers/device_MSD_plane.h" //Added by KA
#include "headers/device_MSD_limited.h" //Added by KA
#include "headers/device_SNR_limited.h" //Added by KA
#include "headers/device_SPS_long.h" //Added by KA
#include "headers/device_threshold.h" //Added by KA
#include "headers/device_single_FIR.h" //Added by KA
#include "headers/device_analysis.h" //Added by KA
#include "headers/device_periods.h" //Added by KA
#include "headers/device_peak_find.h" //Added by KA
#include "headers/device_power.h"
#include "headers/device_harmonic_summing.h"
#include "headers/device_load_data.h"
#include "headers/device_corner_turn.h"
#include "headers/device_save_data.h"
#include "headers/host_acceleration.h"
#include "headers/host_allocate_memory.h"
#include "headers/host_analysis.h"
#include "headers/host_periods.h"
#include "headers/host_debug.h"
#include "headers/host_get_file_data.h"
#include "headers/host_get_recorded_data.h"
#include "headers/host_get_user_input.h"
#include "headers/host_help.h"
#include "headers/host_rfi.h"
#include "headers/host_stratagy.h"
#include "headers/host_write_file.h"
// fdas
#include "headers/device_acceleration_fdas.h"
#include "headers/host_main_function.h"
#include "headers/params.h"
#include "timer.h"
void main_function
(
int argc,
char* argv[],
// Internal code variables
// File pointers
FILE *fp,
// Counters and flags
int i,
int t,
int dm_range,
int range,
int enable_debug,
int enable_analysis,
int enable_acceleration,
int enable_output_ffdot_plan,
int enable_output_fdas_list,
int enable_periodicity,
int output_dmt,
int enable_zero_dm,
int enable_zero_dm_with_outliers,
int enable_rfi,
int enable_sps_baselinenoise,
int enable_fdas_custom_fft,
int enable_fdas_inbin,
int enable_fdas_norm,
int *inBin,
int *outBin,
int *ndms,
int maxshift,
int max_ndms,
int max_samps,
int num_tchunks,
int total_ndms,
int multi_file,
float max_dm,
// Memory sizes and pointers
size_t inputsize,
size_t outputsize,
size_t gpu_inputsize,
size_t gpu_outputsize,
size_t gpu_memory,
unsigned short *input_buffer,
float ***output_buffer,
unsigned short *d_input,
float *d_output,
float *dmshifts,
float *user_dm_low,
float *user_dm_high,
float *user_dm_step,
float *dm_low,
float *dm_high,
float *dm_step,
// Telescope parameters
int nchans,
int nsamp,
int nbits,
int nsamples,
int nifs,
int **t_processed,
int nboots,
int ntrial_bins,
int navdms,
int nsearch,
float aggression,
float narrow,
float wide,
int maxshift_original,
double tsamp_original,
long int inc,
float tstart,
float tstart_local,
float tsamp,
float fch1,
float foff,
// Analysis variables
float power,
float sigma_cutoff,
float sigma_constant,
float max_boxcar_width_in_sec,
clock_t start_time,
int candidate_algorithm,
int nb_selected_dm,
float *selected_dm_low,
float *selected_dm_high,
int analysis_debug,
int failsafe,
float periodicity_sigma_cutoff,
int periodicity_nHarmonics
)
{
// Initialise the GPU.
init_gpu(argc, argv, enable_debug, &gpu_memory);
if(enable_debug == 1) debug(2, start_time, range, outBin, enable_debug, enable_analysis, output_dmt, multi_file, sigma_cutoff, power, max_ndms, user_dm_low, user_dm_high,
user_dm_step, dm_low, dm_high, dm_step, ndms, nchans, nsamples, nifs, nbits, tsamp, tstart, fch1, foff, maxshift, max_dm, nsamp, gpu_inputsize, gpu_outputsize, inputsize, outputsize);
checkCudaErrors(cudaGetLastError());
// Calculate the dedispersion stratagy.
stratagy(&maxshift, &max_samps, &num_tchunks, &max_ndms, &total_ndms, &max_dm, power, nchans, nsamp, fch1, foff, tsamp, range, user_dm_low, user_dm_high, user_dm_step,
&dm_low, &dm_high, &dm_step, &ndms, &dmshifts, inBin, &t_processed, &gpu_memory, Get_memory_requirement_of_SPS());
if(enable_debug == 1) debug(4, start_time, range, outBin, enable_debug, enable_analysis, output_dmt, multi_file, sigma_cutoff, power, max_ndms, user_dm_low, user_dm_high,
user_dm_step, dm_low, dm_high, dm_step, ndms, nchans, nsamples, nifs, nbits, tsamp, tstart, fch1, foff, maxshift, max_dm, nsamp, gpu_inputsize, gpu_outputsize, inputsize, outputsize);
checkCudaErrors(cudaGetLastError());
// Allocate memory on host and device.
allocate_memory_cpu_output(&fp, gpu_memory, maxshift, num_tchunks, max_ndms, total_ndms, nsamp, nchans, nbits, range, ndms, t_processed, &input_buffer, &output_buffer, &d_input, &d_output,
&gpu_inputsize, &gpu_outputsize, &inputsize, &outputsize);
if(enable_debug == 1) debug(5, start_time, range, outBin, enable_debug, enable_analysis, output_dmt, multi_file, sigma_cutoff, power, max_ndms, user_dm_low, user_dm_high,
user_dm_step, dm_low, dm_high, dm_step, ndms, nchans, nsamples, nifs, nbits, tsamp, tstart, fch1, foff, maxshift, max_dm, nsamp, gpu_inputsize, gpu_outputsize, inputsize, outputsize);
checkCudaErrors(cudaGetLastError());
// Allocate memory on host and device.
allocate_memory_gpu(&fp, gpu_memory, maxshift, num_tchunks, max_ndms, total_ndms, nsamp, nchans, nbits, range, ndms, t_processed, &input_buffer, &output_buffer, &d_input, &d_output,
&gpu_inputsize, &gpu_outputsize, &inputsize, &outputsize);
if(enable_debug == 1) debug(5, start_time, range, outBin, enable_debug, enable_analysis, output_dmt, multi_file, sigma_cutoff, power, max_ndms, user_dm_low, user_dm_high,
user_dm_step, dm_low, dm_high, dm_step, ndms, nchans, nsamples, nifs, nbits, tsamp, tstart, fch1, foff, maxshift, max_dm, nsamp, gpu_inputsize, gpu_outputsize, inputsize, outputsize);
checkCudaErrors(cudaGetLastError());
// Clip RFI
//rfi(nsamp, nchans, &input_buffer);
/*
FILE *fp_o;
if ((fp_o=fopen("rfi_clipped.dat", "wb")) == NULL) {
fprintf(stderr, "Error opening output file!\n");
exit(0);
}
fwrite(input_buffer, nchans*nsamp*sizeof(unsigned short), 1, fp_o);
*/
printf("\nDe-dispersing...");
GpuTimer timer;
timer.Start();
tsamp_original = tsamp;
maxshift_original = maxshift;
//float *out_tmp;
//out_tmp = (float *) malloc(( t_processed[0][0] + maxshift ) * max_ndms * sizeof(float));
//memset(out_tmp, 0.0f, t_processed[0][0] + maxshift * max_ndms * sizeof(float));
for (t = 0; t < num_tchunks; t++)
{
printf("\nt_processed:\t%d, %d", t_processed[0][t], t);
checkCudaErrors(cudaGetLastError());
load_data(-1, inBin, d_input, &input_buffer[(long int) ( inc * nchans )], t_processed[0][t], maxshift, nchans, dmshifts);
checkCudaErrors(cudaGetLastError());
if (enable_zero_dm)
{
zero_dm(d_input, nchans, t_processed[0][t]+maxshift);
}
checkCudaErrors(cudaGetLastError());
if (enable_zero_dm_with_outliers)
{
zero_dm_outliers(d_input, nchans, t_processed[0][t]+maxshift);
}
checkCudaErrors(cudaGetLastError());
corner_turn(d_input, d_output, nchans, t_processed[0][t] + maxshift);
checkCudaErrors(cudaGetLastError());
if (enable_rfi)
{
rfi_gpu(d_input, nchans, t_processed[0][t]+maxshift);
}
checkCudaErrors(cudaGetLastError());
int oldBin = 1;
for (dm_range = 0; dm_range < range; dm_range++) {
printf("\n\n%f\t%f\t%f\t%d", dm_low[dm_range], dm_high[dm_range], dm_step[dm_range], ndms[dm_range]), fflush(stdout);
printf("\nAmount of telescope time processed: %f", tstart_local);
maxshift = maxshift_original / inBin[dm_range];
checkCudaErrors(cudaGetLastError());
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
load_data(dm_range, inBin, d_input, &input_buffer[(long int) ( inc * nchans )], t_processed[dm_range][t], maxshift, nchans, dmshifts);
checkCudaErrors(cudaGetLastError());
if (inBin[dm_range] > oldBin)
{
bin_gpu(d_input, d_output, nchans, t_processed[dm_range - 1][t] + maxshift * inBin[dm_range]);
( tsamp ) = ( tsamp ) * 2.0f;
}
checkCudaErrors(cudaGetLastError());
dedisperse(dm_range, t_processed[dm_range][t], inBin, dmshifts, d_input, d_output, nchans, ( t_processed[dm_range][t] + maxshift ), maxshift, &tsamp, dm_low, dm_high, dm_step, ndms, nbits, failsafe);
checkCudaErrors(cudaGetLastError());
if ( (enable_acceleration == 1) || (enable_periodicity == 1) || (analysis_debug ==1) )
{
// gpu_outputsize = ndms[dm_range] * ( t_processed[dm_range][t] ) * sizeof(float);
//save_data(d_output, out_tmp, gpu_outputsize);
//#pragma omp parallel for
for (int k = 0; k < ndms[dm_range]; k++)
{
//memcpy(&output_buffer[dm_range][k][inc / inBin[dm_range]], &out_tmp[k * t_processed[dm_range][t]], sizeof(float) * t_processed[dm_range][t]);
save_data_offset(d_output, k * t_processed[dm_range][t], output_buffer[dm_range][k], inc / inBin[dm_range], sizeof(float) * t_processed[dm_range][t]);
}
// save_data(d_output, &output_buffer[dm_range][0][((long int)inc)/inBin[dm_range]], gpu_outputsize);
}
if (output_dmt == 1)
{
//for (int k = 0; k < ndms[dm_range]; k++)
// write_output(dm_range, t_processed[dm_range][t], ndms[dm_range], gpu_memory, output_buffer[dm_range][k], gpu_outputsize, dm_low, dm_high);
//write_output(dm_range, t_processed[dm_range][t], ndms[dm_range], gpu_memory, out_tmp, gpu_outputsize, dm_low, dm_high);
}
checkCudaErrors(cudaGetLastError());
if (enable_analysis == 1) {
printf("\n VALUE OF ANALYSIS DEBUG IS %d\n", analysis_debug);
if (analysis_debug == 1)
{
float *out_tmp;
gpu_outputsize = ndms[dm_range] * ( t_processed[dm_range][t] ) * sizeof(float);
out_tmp = (float *) malloc(( t_processed[0][0] + maxshift ) * max_ndms * sizeof(float));
memset(out_tmp, 0.0f, t_processed[0][0] + maxshift * max_ndms * sizeof(float));
save_data(d_output, out_tmp, gpu_outputsize);
analysis_CPU(dm_range, tstart_local, t_processed[dm_range][t], (t_processed[dm_range][t]+maxshift), nchans, maxshift, max_ndms, ndms, outBin, sigma_cutoff, out_tmp,dm_low, dm_high, dm_step, tsamp, max_boxcar_width_in_sec);
free(out_tmp);
}
else
{
float *h_peak_list;
size_t max_peak_size;
size_t peak_pos;
max_peak_size = (size_t) ( ndms[dm_range]*t_processed[dm_range][t]/2 );
h_peak_list = (float*) malloc(max_peak_size*4*sizeof(float));
peak_pos=0;
analysis_GPU(h_peak_list, &peak_pos, max_peak_size, dm_range, tstart_local, t_processed[dm_range][t], inBin[dm_range], outBin[dm_range], &maxshift, max_ndms, ndms, sigma_cutoff, sigma_constant, max_boxcar_width_in_sec, d_output, dm_low, dm_high, dm_step, tsamp, candidate_algorithm, enable_sps_baselinenoise);
free(h_peak_list);
}
// This is for testing purposes and should be removed or commented out
//analysis_CPU(dm_range, tstart_local, t_processed[dm_range][t], (t_processed[dm_range][t]+maxshift), nchans, maxshift, max_ndms, ndms, outBin, sigma_cutoff, out_tmp,dm_low, dm_high, dm_step, tsamp);
}
oldBin = inBin[dm_range];
}
//memset(out_tmp, 0.0f, t_processed[0][0] + maxshift * max_ndms * sizeof(float));
inc = inc + t_processed[0][t];
printf("\nINC:\t%ld", inc);
tstart_local = ( tsamp_original * inc );
tsamp = tsamp_original;
maxshift = maxshift_original;
}
timer.Stop();
float time = timer.Elapsed() / 1000;
printf("\n\n === OVERALL DEDISPERSION THROUGHPUT INCLUDING SYNCS AND DATA TRANSFERS ===\n");
printf("\n(Performed Brute-Force Dedispersion: %g (GPU estimate)", time);
printf("\nAmount of telescope time processed: %f", tstart_local);
printf("\nNumber of samples processed: %ld", inc);
printf("\nReal-time speedup factor: %lf", ( tstart_local ) / time);
cudaFree(d_input);
cudaFree(d_output);
//free(out_tmp);
free(input_buffer);
double time_processed = ( tstart_local ) / tsamp_original;
double dm_t_processed = time_processed * total_ndms;
double all_processed = dm_t_processed * nchans;
printf("\nGops based on %.2lf ops per channel per tsamp: %f", NOPS, ( ( NOPS * all_processed ) / ( time ) ) / 1000000000.0);
int num_reg = SNUMREG;
float num_threads = total_ndms * ( t_processed[0][0] ) / ( num_reg );
float data_size_loaded = ( num_threads * nchans * sizeof(ushort) ) / 1000000000;
float time_in_sec = time;
float bandwidth = data_size_loaded / time_in_sec;
printf("\nDevice global memory bandwidth in GB/s: %f", bandwidth);
printf("\nDevice shared memory bandwidth in GB/s: %f", bandwidth * ( num_reg ));
float size_gb = ( nchans * ( t_processed[0][0] ) * sizeof(float) * 8 ) / 1000000000.0;
printf("\nTelescope data throughput in Gb/s: %f", size_gb / time_in_sec);
if (enable_periodicity == 1)
{
//
GpuTimer timer;
timer.Start();
//
GPU_periodicity(range, nsamp, max_ndms, inc, periodicity_sigma_cutoff, output_buffer, ndms, inBin, dm_low, dm_high, dm_step, tsamp_original, periodicity_nHarmonics, candidate_algorithm, enable_sps_baselinenoise, sigma_constant);
//
timer.Stop();
float time = timer.Elapsed()/1000;
printf("\n\n === OVERALL PERIODICITY THROUGHPUT INCLUDING SYNCS AND DATA TRANSFERS ===\n");
printf("\nPerformed Peroidicity Location: %f (GPU estimate)", time);
printf("\nAmount of telescope time processed: %f", tstart_local);
printf("\nNumber of samples processed: %ld", inc);
printf("\nReal-time speedup factor: %f", ( tstart_local ) / ( time ));
}
if (enable_acceleration == 1)
{
// Input needed for fdas is output_buffer which is DDPlan
// Assumption: gpu memory is free and available
//
GpuTimer timer;
timer.Start();
// acceleration(range, nsamp, max_ndms, inc, nboots, ntrial_bins, navdms, narrow, wide, nsearch, aggression, sigma_cutoff, output_buffer, ndms, inBin, dm_low, dm_high, dm_step, tsamp_original);
acceleration_fdas(range, nsamp, max_ndms, inc, nboots, ntrial_bins, navdms, narrow, wide, nsearch, aggression, sigma_cutoff,
output_buffer, ndms, inBin, dm_low, dm_high, dm_step, tsamp_original, enable_fdas_custom_fft, enable_fdas_inbin, enable_fdas_norm, sigma_constant, enable_output_ffdot_plan, enable_output_fdas_list);
//
timer.Stop();
float time = timer.Elapsed()/1000;
printf("\n\n === OVERALL TDAS THROUGHPUT INCLUDING SYNCS AND DATA TRANSFERS ===\n");
printf("\nPerformed Acceleration Location: %lf (GPU estimate)", time);
printf("\nAmount of telescope time processed: %f", tstart_local);
printf("\nNumber of samples processed: %ld", inc);
printf("\nReal-time speedup factor: %lf", ( tstart_local ) / ( time ));
}
}
|
8b02c036c39ac5ee5460a853a2226174d0d685e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "global.h"
Mandelbrot_cu::Mandelbrot_cu()
{
// set xmin, xmax, ymin, ymax to default values.
this->ComplexXMin = -2.00;
this->ComplexXMax = 1;
this->ComplexYMin = -1.5;
this->ComplexYMax = 1.5;
}
__global__ void CalcPoint( float *x, float *y, int *scheme, int nx, int ny, int maxIter )
{
// get index into x array
int index = threadIdx.x + blockIdx.x * blockDim.x;
if ( index < nx )
{
int start = index*ny;
int end = start + ny;
// iterate the column values corresponding to this row
for( int i = start; i < end; i++ )
{
// x0 and y0 to be added onto points
double zx0 = x[index];
double zy0 = y[i];
double zx = x[index];
double zy = y[i];
int count = 0;
// determine convergence/divergence
while( ( zx*zx + zy*zy <= 4.0 ) && ( count < maxIter ) )
{
// complex square
double new_zx = zx*zx - zy*zy;
double new_zy = 2 * zx * zy;
zx = new_zx;
zy = new_zy;
// add z0
zx += zx0;
zy += zy0;
// incr count
count++;
}
// set color
if( count >= maxIter )
{
scheme[i] = 0;
}
else if ( count > ( maxIter / 8) )
{
scheme[i] = 2;
}
else if ( count > ( maxIter / 10) )
{
scheme[i] = 3;
}
else if ( count > ( maxIter / 20) )
{
scheme[i] = 4;
}
else if ( count > ( maxIter / 40) )
{
scheme[i] = 5;
}
else if ( count > ( maxIter / 100) )
{
scheme[i] = 6;
}
else if ( count > (maxIter / 200) )
{
scheme[i] = 7;
}
else if ( count > (maxIter / 400) )
{
scheme[i] = 8;
}
else if ( count > (maxIter / 600) )
{
scheme[i] = 9;
}
else if ( count > (maxIter / 800) )
{
scheme[i] = 1;
}
else
{
scheme[i] = 10;
}
}
}
}
vector< ComplexPoint > Mandelbrot_cu::GetPoints( int nx, int ny, int maxIter )
{
vector< ComplexPoint > points;
// determine array size and allocate memory
int size_nx = nx * sizeof( float );
int size_nynx = ( nx*ny ) * sizeof( float );
int size_sch = ( nx*ny ) * sizeof( int );
float *x = ( float * )malloc(size_nx);
float *y = ( float * )malloc(size_nynx);
int *scheme = ( int *)malloc(size_sch);
// fill arrays with points before passing
ComplexWidth = ComplexXMax - ComplexXMin;
ComplexHeight = ComplexYMax - ComplexYMin;
ComplexPoint z, zIncr;
// calculates x and y increments
zIncr.x = ComplexWidth / float( nx );
zIncr.y = ComplexHeight / float( ny );
for( int i = 0; i < nx; i++ )
{
// get and set complex x value
x[i] = ComplexXMin + ( zIncr.x * i );
int multiplier = 0;
for( int j = i*ny; j < (i+1)*ny; j++ )
{
// get and set complex y value (and default scheme)
y[j] = ComplexYMin + ( zIncr.y * multiplier );
scheme[j] = 0;
multiplier++;
}
}
// Do host side CUDA prep and run kernel on CUDA device
// create device vectors
float *d_x, *d_y;
int *d_scheme;
// allocate memory on the device for our arrays
hipMalloc( ( void** )&d_x, size_nx );
hipMalloc( ( void** )&d_y, size_nynx );
hipMalloc( ( void** )&d_scheme, size_sch );
// copy memory from host to the device
hipMemcpy( d_x, x, size_nx, hipMemcpyHostToDevice );
hipMemcpy( d_y, y, size_nynx, hipMemcpyHostToDevice );
hipMemcpy( d_scheme, scheme, size_sch, hipMemcpyHostToDevice );
// set number of threads and calculates blocks
int nThreads = 64;
int nBlocks = ( nx + nThreads - 1 ) / nThreads;
// calculate scheme indexes on the GPU
hipLaunchKernelGGL(( CalcPoint), dim3(nBlocks), dim3(nThreads) , 0, 0, d_x, d_y, d_scheme, nx, ny, maxIter );
// copy arrays back to host from GPU
hipMemcpy( x, d_x, size_nx, hipMemcpyDeviceToHost );
hipMemcpy( y, d_y, size_nynx, hipMemcpyDeviceToHost );
hipMemcpy( scheme, d_scheme, size_sch, hipMemcpyDeviceToHost );
// create points from the x, y and scheme values and push onto vector
for( int i = 0; i < nx; i++ )
{
z.x = x[i];
for( int j = i*ny; j < (i+1)*ny; j++ )
{
z.y = y[j];
z.schemeIndex = scheme[j];
points.push_back(z);
}
}
// free memory
free(x); free(y); free(scheme);
hipFree(d_x); hipFree(d_y); hipFree(d_scheme);
return points;
}
// Getters and setters for xmin, xmax, ymin, and ymax
double Mandelbrot_cu::GetComplexXMin()
{
return this->ComplexXMin;
}
double Mandelbrot_cu::GetComplexXMax()
{
return this->ComplexXMax;
}
double Mandelbrot_cu::GetComplexYMin()
{
return this->ComplexYMin;
}
double Mandelbrot_cu::GetComplexYMax()
{
return this->ComplexYMax;
}
void Mandelbrot_cu::SetComplexXMin( double xmin )
{
this->ComplexXMin = xmin;
}
void Mandelbrot_cu::SetComplexXMax( double xmax )
{
this->ComplexXMax = xmax;
}
void Mandelbrot_cu::SetComplexYMin( double ymin )
{
this->ComplexYMin = ymin;
}
void Mandelbrot_cu::SetComplexYMax( double ymax )
{
this->ComplexYMax = ymax;
}
| 8b02c036c39ac5ee5460a853a2226174d0d685e1.cu | #include "global.h"
Mandelbrot_cu::Mandelbrot_cu()
{
// set xmin, xmax, ymin, ymax to default values.
this->ComplexXMin = -2.00;
this->ComplexXMax = 1;
this->ComplexYMin = -1.5;
this->ComplexYMax = 1.5;
}
__global__ void CalcPoint( float *x, float *y, int *scheme, int nx, int ny, int maxIter )
{
// get index into x array
int index = threadIdx.x + blockIdx.x * blockDim.x;
if ( index < nx )
{
int start = index*ny;
int end = start + ny;
// iterate the column values corresponding to this row
for( int i = start; i < end; i++ )
{
// x0 and y0 to be added onto points
double zx0 = x[index];
double zy0 = y[i];
double zx = x[index];
double zy = y[i];
int count = 0;
// determine convergence/divergence
while( ( zx*zx + zy*zy <= 4.0 ) && ( count < maxIter ) )
{
// complex square
double new_zx = zx*zx - zy*zy;
double new_zy = 2 * zx * zy;
zx = new_zx;
zy = new_zy;
// add z0
zx += zx0;
zy += zy0;
// incr count
count++;
}
// set color
if( count >= maxIter )
{
scheme[i] = 0;
}
else if ( count > ( maxIter / 8) )
{
scheme[i] = 2;
}
else if ( count > ( maxIter / 10) )
{
scheme[i] = 3;
}
else if ( count > ( maxIter / 20) )
{
scheme[i] = 4;
}
else if ( count > ( maxIter / 40) )
{
scheme[i] = 5;
}
else if ( count > ( maxIter / 100) )
{
scheme[i] = 6;
}
else if ( count > (maxIter / 200) )
{
scheme[i] = 7;
}
else if ( count > (maxIter / 400) )
{
scheme[i] = 8;
}
else if ( count > (maxIter / 600) )
{
scheme[i] = 9;
}
else if ( count > (maxIter / 800) )
{
scheme[i] = 1;
}
else
{
scheme[i] = 10;
}
}
}
}
vector< ComplexPoint > Mandelbrot_cu::GetPoints( int nx, int ny, int maxIter )
{
vector< ComplexPoint > points;
// determine array size and allocate memory
int size_nx = nx * sizeof( float );
int size_nynx = ( nx*ny ) * sizeof( float );
int size_sch = ( nx*ny ) * sizeof( int );
float *x = ( float * )malloc(size_nx);
float *y = ( float * )malloc(size_nynx);
int *scheme = ( int *)malloc(size_sch);
// fill arrays with points before passing
ComplexWidth = ComplexXMax - ComplexXMin;
ComplexHeight = ComplexYMax - ComplexYMin;
ComplexPoint z, zIncr;
// calculates x and y increments
zIncr.x = ComplexWidth / float( nx );
zIncr.y = ComplexHeight / float( ny );
for( int i = 0; i < nx; i++ )
{
// get and set complex x value
x[i] = ComplexXMin + ( zIncr.x * i );
int multiplier = 0;
for( int j = i*ny; j < (i+1)*ny; j++ )
{
// get and set complex y value (and default scheme)
y[j] = ComplexYMin + ( zIncr.y * multiplier );
scheme[j] = 0;
multiplier++;
}
}
// Do host side CUDA prep and run kernel on CUDA device
// create device vectors
float *d_x, *d_y;
int *d_scheme;
// allocate memory on the device for our arrays
cudaMalloc( ( void** )&d_x, size_nx );
cudaMalloc( ( void** )&d_y, size_nynx );
cudaMalloc( ( void** )&d_scheme, size_sch );
// copy memory from host to the device
cudaMemcpy( d_x, x, size_nx, cudaMemcpyHostToDevice );
cudaMemcpy( d_y, y, size_nynx, cudaMemcpyHostToDevice );
cudaMemcpy( d_scheme, scheme, size_sch, cudaMemcpyHostToDevice );
// set number of threads and calculates blocks
int nThreads = 64;
int nBlocks = ( nx + nThreads - 1 ) / nThreads;
// calculate scheme indexes on the GPU
CalcPoint<<< nBlocks, nThreads >>>( d_x, d_y, d_scheme, nx, ny, maxIter );
// copy arrays back to host from GPU
cudaMemcpy( x, d_x, size_nx, cudaMemcpyDeviceToHost );
cudaMemcpy( y, d_y, size_nynx, cudaMemcpyDeviceToHost );
cudaMemcpy( scheme, d_scheme, size_sch, cudaMemcpyDeviceToHost );
// create points from the x, y and scheme values and push onto vector
for( int i = 0; i < nx; i++ )
{
z.x = x[i];
for( int j = i*ny; j < (i+1)*ny; j++ )
{
z.y = y[j];
z.schemeIndex = scheme[j];
points.push_back(z);
}
}
// free memory
free(x); free(y); free(scheme);
cudaFree(d_x); cudaFree(d_y); cudaFree(d_scheme);
return points;
}
// Getters and setters for xmin, xmax, ymin, and ymax
double Mandelbrot_cu::GetComplexXMin()
{
return this->ComplexXMin;
}
double Mandelbrot_cu::GetComplexXMax()
{
return this->ComplexXMax;
}
double Mandelbrot_cu::GetComplexYMin()
{
return this->ComplexYMin;
}
double Mandelbrot_cu::GetComplexYMax()
{
return this->ComplexYMax;
}
void Mandelbrot_cu::SetComplexXMin( double xmin )
{
this->ComplexXMin = xmin;
}
void Mandelbrot_cu::SetComplexXMax( double xmax )
{
this->ComplexXMax = xmax;
}
void Mandelbrot_cu::SetComplexYMin( double ymin )
{
this->ComplexYMin = ymin;
}
void Mandelbrot_cu::SetComplexYMax( double ymax )
{
this->ComplexYMax = ymax;
}
|
48d89204890866c5a365c8775a3cdbc75b12face.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* ___ _ _ ___ _ __ __ _ ___
* / __| | | | \ /_\ | \/ | /_\ | _ \
* | (__| |_| | |) / _ \ | |\/| |/ _ \| _/
* \___|\___/|___/_/_\_\_|_|__|_/_/_\_\_|_ ___
* / __| | | | _ \ __| _ \___| _ \ __/ __|
* \__ \ |_| | _/ _|| /___| / _|\__ \
* |___/\___/|_| |___|_|_\ |_|_\___|___/
* 2012
*
* by Jens Wetzl ([email protected])
* and Oliver Taubmann ([email protected])
*
* This work is licensed under a Creative Commons
* Attribution 3.0 Unported License. (CC-BY)
* http://creativecommons.org/licenses/by/3.0/
*
**/
#include "cudalbfgs_error_checking.h"
#include <stdio.h>
#include <cmath>
namespace ReductionGPU
{
template <class T, unsigned int blockSize>
__global__ void
sumReduction(const T *g_idata, T *g_odata, const unsigned int width,
const unsigned int height, const unsigned int ld);
}
namespace Reduction
{
template <class T>
void sumReduction(const T *d_data, const unsigned int width, const unsigned int height,
const unsigned int ld, T *d_result, T *d_odata)
{
const unsigned int n = width*height;
const int numThreads = 512; // has to be power of 2
const int numBlocks = (n % (2*numThreads) == 0)
? n / (2*numThreads)
: n / (2*numThreads) + 1;
dim3 dimBlock(numThreads, 1, 1);
dim3 dimGrid(numBlocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (numThreads <= 32) ? 2 * numThreads * sizeof(T) : numThreads * sizeof(T);
if (!d_odata)
CudaSafeCall( hipMalloc((void**) &d_odata, numBlocks * sizeof(T)) );
hipLaunchKernelGGL(( ReductionGPU::sumReduction<T, numThreads>), dim3(dimGrid), dim3(dimBlock), smemSize , 0, d_data, d_odata, width, height, ld);
CudaCheckError();
hipDeviceSynchronize();
hipLaunchKernelGGL(( ReductionGPU::sumReduction<T, numThreads>), dim3(1), dim3(dimBlock), smemSize , 0, d_odata, d_result, numBlocks, 1, numBlocks);
CudaCheckError();
hipDeviceSynchronize();
}
}
namespace ReductionGPU
{
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
template<class T>
struct SharedMemory
{
__device__ inline operator T*()
{
extern __shared__ int __smem[];
return (T*)__smem;
}
__device__ inline operator const T*() const
{
extern __shared__ int __smem[];
return (T*)__smem;
}
};
// specialize for double to avoid unaligned memory
// access compile errors
template<>
struct SharedMemory<double>
{
__device__ inline operator double*()
{
extern __shared__ double __smem_d[];
return (double*)__smem_d;
}
__device__ inline operator const double*() const
{
extern __shared__ double __smem_d[];
return (double*)__smem_d;
}
};
template <class T, unsigned int blockSize>
__global__ void
sumReduction(const T *g_idata, T *g_odata, const unsigned int width,
const unsigned int height, const unsigned int ld)
{
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
const unsigned int n = width*height;
T mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
const unsigned int y = i / width;
const unsigned int x = i - y*width;
mySum += g_idata[y*ld + x];
// ensure we don't read out of bounds
if (i + blockSize < n)
{
const unsigned int y2 = (i+blockSize) / width;
const unsigned int x2 = (i+blockSize) - y2*width;
mySum += g_idata[y2*ld + x2];
}
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); }
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile T* smem = sdata;
if (blockSize >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; }
if (blockSize >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; }
if (blockSize >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; }
if (blockSize >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; }
if (blockSize >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; }
if (blockSize >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; }
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
}
| 48d89204890866c5a365c8775a3cdbc75b12face.cu | /**
* ___ _ _ ___ _ __ __ _ ___
* / __| | | | \ /_\ | \/ | /_\ | _ \
* | (__| |_| | |) / _ \ | |\/| |/ _ \| _/
* \___|\___/|___/_/_\_\_|_|__|_/_/_\_\_|_ ___
* / __| | | | _ \ __| _ \___| _ \ __/ __|
* \__ \ |_| | _/ _|| /___| / _|\__ \
* |___/\___/|_| |___|_|_\ |_|_\___|___/
* 2012
*
* by Jens Wetzl ([email protected])
* and Oliver Taubmann ([email protected])
*
* This work is licensed under a Creative Commons
* Attribution 3.0 Unported License. (CC-BY)
* http://creativecommons.org/licenses/by/3.0/
*
**/
#include "cudalbfgs_error_checking.h"
#include <stdio.h>
#include <cmath>
namespace ReductionGPU
{
template <class T, unsigned int blockSize>
__global__ void
sumReduction(const T *g_idata, T *g_odata, const unsigned int width,
const unsigned int height, const unsigned int ld);
}
namespace Reduction
{
template <class T>
void sumReduction(const T *d_data, const unsigned int width, const unsigned int height,
const unsigned int ld, T *d_result, T *d_odata)
{
const unsigned int n = width*height;
const int numThreads = 512; // has to be power of 2
const int numBlocks = (n % (2*numThreads) == 0)
? n / (2*numThreads)
: n / (2*numThreads) + 1;
dim3 dimBlock(numThreads, 1, 1);
dim3 dimGrid(numBlocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (numThreads <= 32) ? 2 * numThreads * sizeof(T) : numThreads * sizeof(T);
if (!d_odata)
CudaSafeCall( cudaMalloc((void**) &d_odata, numBlocks * sizeof(T)) );
ReductionGPU::sumReduction<T, numThreads><<< dimGrid, dimBlock, smemSize >>>(d_data, d_odata, width, height, ld);
CudaCheckError();
cudaDeviceSynchronize();
ReductionGPU::sumReduction<T, numThreads><<< 1, dimBlock, smemSize >>>(d_odata, d_result, numBlocks, 1, numBlocks);
CudaCheckError();
cudaDeviceSynchronize();
}
}
namespace ReductionGPU
{
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
template<class T>
struct SharedMemory
{
__device__ inline operator T*()
{
extern __shared__ int __smem[];
return (T*)__smem;
}
__device__ inline operator const T*() const
{
extern __shared__ int __smem[];
return (T*)__smem;
}
};
// specialize for double to avoid unaligned memory
// access compile errors
template<>
struct SharedMemory<double>
{
__device__ inline operator double*()
{
extern __shared__ double __smem_d[];
return (double*)__smem_d;
}
__device__ inline operator const double*() const
{
extern __shared__ double __smem_d[];
return (double*)__smem_d;
}
};
template <class T, unsigned int blockSize>
__global__ void
sumReduction(const T *g_idata, T *g_odata, const unsigned int width,
const unsigned int height, const unsigned int ld)
{
T *sdata = SharedMemory<T>();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x;
unsigned int gridSize = blockSize*2*gridDim.x;
const unsigned int n = width*height;
T mySum = 0;
// we reduce multiple elements per thread. The number is determined by the
// number of active thread blocks (via gridDim). More blocks will result
// in a larger gridSize and therefore fewer elements per thread
while (i < n)
{
const unsigned int y = i / width;
const unsigned int x = i - y*width;
mySum += g_idata[y*ld + x];
// ensure we don't read out of bounds
if (i + blockSize < n)
{
const unsigned int y2 = (i+blockSize) / width;
const unsigned int x2 = (i+blockSize) - y2*width;
mySum += g_idata[y2*ld + x2];
}
i += gridSize;
}
// each thread puts its local sum into shared memory
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
if (blockSize >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); }
if (tid < 32)
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile T* smem = sdata;
if (blockSize >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; }
if (blockSize >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; }
if (blockSize >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; }
if (blockSize >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; }
if (blockSize >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; }
if (blockSize >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; }
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
}
|
5be3be5392eb972ddf6377d606a9cf3b30f551d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <windows.h>
#include <helper_cuda.h>
#include <helper_string.h>
#include <time.h>
__global__ void pBP(float *recc, int *lineind, float *datacq, int * guid, int lz, int ly, int Sx, int blockx, int blocky, int nblockx, int nblocky){
int jj = blockIdx.x;
int j = threadIdx.x;
jj=jj+blocky*nblocky;
j=j+blockx*nblockx;
for(int i =0; i < (lz-1); i++ ) {
for(int ii = guid[i]-1 ; ii < guid[i+1] ; ii++ ) {
recc[(j*(lz-1)*ly)+(jj*(lz-1))+i] = recc[(j*(lz-1)*ly)+(jj*(lz-1))+i] + datacq [int( lineind[ii] ) - 1 + j + jj * Sx ];
}
}
#define N 512
void main( void ) {
hipDeviceReset();
// managing and gathering host memory //
FILE *fpunt; // pointer for write and read from harddrive
double *Nsx; // Number of elements from the acquired data
double *datacq; //sinogram
float *datacqf; //sinogram in float type precision (less memory better for GPU, althoung oduble is not that bad...). Could come like this from MATLAB. I KNOW.
double *lineind; // addresses of the voxels to be summed in a line of voxels following the trandsucer axis
int *lineindint; // addresses of the voxels to be summed in int type (less memory)
double *guid; // guide to read the voxels addresses.
int *guidint;
int nblockx; int nblocky; // number of blocks in x direction, y direction.
int blockx; int blocky; // block being reconstructed.
int lbx; int lby; // Number of lines per blocks (number of parralell threads= lbx*lby)
float *recc; // reconstruction
double *lxyz; //vector containing the size of the array of addresses and the size of the acquired data
int Sx; int Sy; int Sz; // Size of the acquired data
int lx; int ly; int lz; // number of veoxels in the reconstuction grid
int lindex; // lenght of the array containing the memory addresses of the voxels to be sum,
int *A; // variable to guide the parallel for in the cuda function
Nsx = (double *)malloc(sizeof(double) * 3);
lxyz= (double *)malloc(sizeof(double) * 4);
int zlim; // limit for each iteration of the reconstructor
int dum; // "helper" variable
double dumm; // "helper" variable
dum=3;
printf("Loading data and copying it to the GPU memory\n");
// Gathering the size of the acquired data //
fpunt = fopen("path", "rb");
fread(Nsx, sizeof(double),dum, fpunt);
fclose(fpunt);
Sx=int(Nsx[0]); Sy=int(Nsx[1]); Sz=int(Nsx[2]);
datacq = (double *)calloc(sizeof(double),Sx*Sy*Sz);
fpunt = fopen("path", "rb");
fread(datacq, sizeof(double),Sx*Sy*Sz, fpunt);
fclose(fpunt);
datacqf = (float *)calloc(sizeof(float),Sx*Sy*Sz);
for(int i=0; i<Sx*Sy*Sz; i++){
datacqf[i]=float(datacq[i]);
}
free(datacq);
//Gathering the size of the vector containing the memory addresses and acquired data without zeros
dum=4;
fpunt = fopen("path", "rb");
fread(lxyz, sizeof(double),dum, fpunt);
fclose(fpunt);
ly=int(lxyz[0]); lx=int(lxyz[1]); lz=int(lxyz[2]); lindex=int(lxyz[3]);
//Gathering the vector of addresses
lineind = (double *)malloc(sizeof(double)*lindex); //acquired data
fpunt = fopen("path", "rb");
fread(lineind, sizeof(double),lindex, fpunt);
fclose(fpunt);
lineindint = (int *)calloc(sizeof(int),lindex);
for(int i=0; i<lindex; i++){
lineindint[i]=int(lineind[i]);
}
free(lineind);
//Gathering the guide of the vector of addresses
guid = (double*)malloc(sizeof(double)*lz); //acquired data
fpunt = fopen("path", "rb");
fread(guid, sizeof(double),lz, fpunt);
fclose(fpunt);
guidint = (int*)malloc(sizeof(int)*lz); //acquired data
for(int i=0;i<lz;i++){
guidint[i]=int(guid[i]);
}
free(guid);
// allocating memory for the reconstruction
recc = ( float* ) calloc( sizeof( float), ( lz-1 ) * lx * ly );
// changing mclasses to svae memmoery
// managing and gathering device memory//
int *devA;
float *devrecc;
int *devlineind;
float *devdatacq;
int *devguid;
hipMalloc( (void**)&devrecc, sizeof(float)* ( lz-1 ) * lx * ly);
hipMemcpy( devrecc, recc,sizeof(float)* ( lz-1 ) * lx * ly, hipMemcpyHostToDevice);
hipMalloc( (void**)&devlineind, sizeof(int)*lindex );
hipMemcpy( devlineind, lineindint,sizeof(int)*lindex, hipMemcpyHostToDevice);
hipMalloc( (void**)&devdatacq, sizeof(float)*Sx*Sy*Sz);
hipMemcpy( devdatacq, datacqf,sizeof(float)*Sx*Sy*Sz, hipMemcpyHostToDevice);
hipMalloc( (void**)&devguid, sizeof(int)*lz);
hipMemcpy( devguid, guidint,sizeof(int)*lz, hipMemcpyHostToDevice);
//************************ setting the number of lines to be reconstructed in parallel (lbx*lby) *************************//
lbx=50;
lby=50;
//***********************************************************************************************************************//
nblockx= int(floor(double(lx)/double(lbx))); // number of blocks in x direction
nblocky= int(floor(double(ly)/double(lby))); // number of blocks in y direction
dim3 blocks(lbx, 1); // conditioning cuda memory blocks
dim3 grids(lby, 1); //
//dim3 blocks(lx, 1); // conditioning cuda meory blocks
//dim3 grids(ly, 1);
int dumblock=0; // dummy variable to count the numer of blocks already reconstructed
printf("Reconstructing\n");
//for(blockx=0; blockx<nblockx; blockx++) {
blockx=0;
blocky=0;
clock_t tic = clock();
for(blockx=0; blockx<nblockx; blockx++) {
for(blocky=0;blocky<nblocky; blocky++) {
hipLaunchKernelGGL(( pBP), dim3(grids),dim3(blocks) , 0, 0, devrecc, devlineind, devdatacq, devguid, lz,ly, Sx, blockx,blocky,lbx,lby);
hipDeviceSynchronize();
dumblock+=1;
printf("block %d out of %d reconstructed \n", dumblock,nblockx*nblocky);
}
}
clock_t toc = clock();
printf("Elapsed: %f seconds\n", (double)(toc - tic) / CLOCKS_PER_SEC);
printf("reconstruction finished, saving data\n");
hipMemcpy( recc, devrecc, sizeof(float)* ( lz-1 ) * lx * ly, hipMemcpyDeviceToHost);
hipFree( devrecc);
hipFree( devlineind);
hipFree( devdatacq);
hipFree( devguid);
double* reccd = ( double* ) calloc( sizeof( double ), ( lz-1 ) * lx * ly );
for(int i=0; i<( lz-1 ) * lx * ly ;i++){
reccd[i]=double(recc[i]);
}
fpunt = fopen("path","w b");
if (fpunt == NULL)
{
printf("The file did not open");
}
fwrite (reccd, sizeof(double),lx*ly*(lz-1),fpunt);
fclose(fpunt);
printf("Fisnished! press any key");
getchar();
} | 5be3be5392eb972ddf6377d606a9cf3b30f551d1.cu |
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <windows.h>
#include <helper_cuda.h>
#include <helper_string.h>
#include <time.h>
__global__ void pBP(float *recc, int *lineind, float *datacq, int * guid, int lz, int ly, int Sx, int blockx, int blocky, int nblockx, int nblocky){
int jj = blockIdx.x;
int j = threadIdx.x;
jj=jj+blocky*nblocky;
j=j+blockx*nblockx;
for(int i =0; i < (lz-1); i++ ) {
for(int ii = guid[i]-1 ; ii < guid[i+1] ; ii++ ) {
recc[(j*(lz-1)*ly)+(jj*(lz-1))+i] = recc[(j*(lz-1)*ly)+(jj*(lz-1))+i] + datacq [int( lineind[ii] ) - 1 + j + jj * Sx ];
}
}
#define N 512
void main( void ) {
cudaDeviceReset();
// managing and gathering host memory //
FILE *fpunt; // pointer for write and read from harddrive
double *Nsx; // Number of elements from the acquired data
double *datacq; //sinogram
float *datacqf; //sinogram in float type precision (less memory better for GPU, althoung oduble is not that bad...). Could come like this from MATLAB. I KNOW.
double *lineind; // addresses of the voxels to be summed in a line of voxels following the trandsucer axis
int *lineindint; // addresses of the voxels to be summed in int type (less memory)
double *guid; // guide to read the voxels addresses.
int *guidint;
int nblockx; int nblocky; // number of blocks in x direction, y direction.
int blockx; int blocky; // block being reconstructed.
int lbx; int lby; // Number of lines per blocks (number of parralell threads= lbx*lby)
float *recc; // reconstruction
double *lxyz; //vector containing the size of the array of addresses and the size of the acquired data
int Sx; int Sy; int Sz; // Size of the acquired data
int lx; int ly; int lz; // number of veoxels in the reconstuction grid
int lindex; // lenght of the array containing the memory addresses of the voxels to be sum,
int *A; // variable to guide the parallel for in the cuda function
Nsx = (double *)malloc(sizeof(double) * 3);
lxyz= (double *)malloc(sizeof(double) * 4);
int zlim; // limit for each iteration of the reconstructor
int dum; // "helper" variable
double dumm; // "helper" variable
dum=3;
printf("Loading data and copying it to the GPU memory\n");
// Gathering the size of the acquired data //
fpunt = fopen("path", "rb");
fread(Nsx, sizeof(double),dum, fpunt);
fclose(fpunt);
Sx=int(Nsx[0]); Sy=int(Nsx[1]); Sz=int(Nsx[2]);
datacq = (double *)calloc(sizeof(double),Sx*Sy*Sz);
fpunt = fopen("path", "rb");
fread(datacq, sizeof(double),Sx*Sy*Sz, fpunt);
fclose(fpunt);
datacqf = (float *)calloc(sizeof(float),Sx*Sy*Sz);
for(int i=0; i<Sx*Sy*Sz; i++){
datacqf[i]=float(datacq[i]);
}
free(datacq);
//Gathering the size of the vector containing the memory addresses and acquired data without zeros
dum=4;
fpunt = fopen("path", "rb");
fread(lxyz, sizeof(double),dum, fpunt);
fclose(fpunt);
ly=int(lxyz[0]); lx=int(lxyz[1]); lz=int(lxyz[2]); lindex=int(lxyz[3]);
//Gathering the vector of addresses
lineind = (double *)malloc(sizeof(double)*lindex); //acquired data
fpunt = fopen("path", "rb");
fread(lineind, sizeof(double),lindex, fpunt);
fclose(fpunt);
lineindint = (int *)calloc(sizeof(int),lindex);
for(int i=0; i<lindex; i++){
lineindint[i]=int(lineind[i]);
}
free(lineind);
//Gathering the guide of the vector of addresses
guid = (double*)malloc(sizeof(double)*lz); //acquired data
fpunt = fopen("path", "rb");
fread(guid, sizeof(double),lz, fpunt);
fclose(fpunt);
guidint = (int*)malloc(sizeof(int)*lz); //acquired data
for(int i=0;i<lz;i++){
guidint[i]=int(guid[i]);
}
free(guid);
// allocating memory for the reconstruction
recc = ( float* ) calloc( sizeof( float), ( lz-1 ) * lx * ly );
// changing mclasses to svae memmoery
// managing and gathering device memory//
int *devA;
float *devrecc;
int *devlineind;
float *devdatacq;
int *devguid;
cudaMalloc( (void**)&devrecc, sizeof(float)* ( lz-1 ) * lx * ly);
cudaMemcpy( devrecc, recc,sizeof(float)* ( lz-1 ) * lx * ly, cudaMemcpyHostToDevice);
cudaMalloc( (void**)&devlineind, sizeof(int)*lindex );
cudaMemcpy( devlineind, lineindint,sizeof(int)*lindex, cudaMemcpyHostToDevice);
cudaMalloc( (void**)&devdatacq, sizeof(float)*Sx*Sy*Sz);
cudaMemcpy( devdatacq, datacqf,sizeof(float)*Sx*Sy*Sz, cudaMemcpyHostToDevice);
cudaMalloc( (void**)&devguid, sizeof(int)*lz);
cudaMemcpy( devguid, guidint,sizeof(int)*lz, cudaMemcpyHostToDevice);
//************************ setting the number of lines to be reconstructed in parallel (lbx*lby) *************************//
lbx=50;
lby=50;
//***********************************************************************************************************************//
nblockx= int(floor(double(lx)/double(lbx))); // number of blocks in x direction
nblocky= int(floor(double(ly)/double(lby))); // number of blocks in y direction
dim3 blocks(lbx, 1); // conditioning cuda memory blocks
dim3 grids(lby, 1); //
//dim3 blocks(lx, 1); // conditioning cuda meory blocks
//dim3 grids(ly, 1);
int dumblock=0; // dummy variable to count the numer of blocks already reconstructed
printf("Reconstructing\n");
//for(blockx=0; blockx<nblockx; blockx++) {
blockx=0;
blocky=0;
clock_t tic = clock();
for(blockx=0; blockx<nblockx; blockx++) {
for(blocky=0;blocky<nblocky; blocky++) {
pBP<<< grids,blocks >>>(devrecc, devlineind, devdatacq, devguid, lz,ly, Sx, blockx,blocky,lbx,lby);
cudaDeviceSynchronize();
dumblock+=1;
printf("block %d out of %d reconstructed \n", dumblock,nblockx*nblocky);
}
}
clock_t toc = clock();
printf("Elapsed: %f seconds\n", (double)(toc - tic) / CLOCKS_PER_SEC);
printf("reconstruction finished, saving data\n");
cudaMemcpy( recc, devrecc, sizeof(float)* ( lz-1 ) * lx * ly, cudaMemcpyDeviceToHost);
cudaFree( devrecc);
cudaFree( devlineind);
cudaFree( devdatacq);
cudaFree( devguid);
double* reccd = ( double* ) calloc( sizeof( double ), ( lz-1 ) * lx * ly );
for(int i=0; i<( lz-1 ) * lx * ly ;i++){
reccd[i]=double(recc[i]);
}
fpunt = fopen("path","w b");
if (fpunt == NULL)
{
printf("The file did not open");
}
fwrite (reccd, sizeof(double),lx*ly*(lz-1),fpunt);
fclose(fpunt);
printf("Fisnished! press any key");
getchar();
} |
6fffd86956cbcd265ecfb2d739b12c3b26789131.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include "util.h"
#include <stdio.h>
#include <math.h>
#include <time.h>
extern void printAllCUDASpecs(hipDeviceProp_t& deviceProp);
void initPrint();
/*
* Matrix Multiplication
* C = AB
*/
#define TILE_WIDTH 32
__global__ void matmul_rec_glob(float* A, float* B, float* C, long long N, long long K, long long M);
__global__ void matmul_rec_shar(float* A, float* B, float* C, long long N, long long K, long long M);
void execute_matmul_rec_glob(float *A, float *B, float *C);
void execute_matmul_rec_shar(float* A, float* B, float* C);
void execute_serial(float* A, float* B, float* C);
const long long N = (1 << 10);
const long long K = (1 << 10);
const long long M = (1 << 10);
const int numIterations = 10;
int tpb, tpb_sqrt;
int main()
{
initPrint();
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
printf("Executing with N=%lld, K=%lld, M=%lld\n", N, M, K);
float *A, *B, *C;
hipMallocManaged(&A, N * K * sizeof(float));
hipMallocManaged(&B, K * M * sizeof(float));
hipMallocManaged(&C, N * M * sizeof(float));
tpb = deviceProp.maxThreadsPerBlock;
tpb_sqrt = sqrt(tpb) + 1e-9;
execute_matmul_rec_glob(A, B, C);
execute_matmul_rec_shar(A, B, C);
execute_serial(A, B, C);
hipFree(A);
hipFree(B);
hipFree(C);
return 0;
}
void initPrint() {
int device;
int deviceCount;
hipGetDeviceCount(&deviceCount);
for (device = 0; device < deviceCount; ++device) {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, device);
printf("\nDevice %d: \"%s\"\n", device, deviceProp.name);
printAllCUDASpecs(deviceProp);
}
}
__global__ void matmul_rec_glob(float* A, float* B, float* C, long long N, long long K, long long M) {
int r = blockIdx.x * blockDim.x + threadIdx.x;
int c = blockIdx.y * blockDim.y + threadIdx.y;
if (r < N && c < M) {
int pos = r * M + c;
C[pos] = 0;
for (int i = 0; i < K; i++) {
C[pos] += A[r*K+i] * B[i*M+c];
}
}
}
__global__ void matmul_rec_shar(float* A, float* B, float* C, long long N, long long K, long long M) {
int r = blockIdx.x * blockDim.x + threadIdx.x;
int c = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float A_tiled[TILE_WIDTH][TILE_WIDTH];
__shared__ float B_tiled[TILE_WIDTH][TILE_WIDTH];
if (r < N && c < M) {
int pos = r * M + c;
C[pos] = 0;
for (int i = 0; i < K; i += TILE_WIDTH) {
//collab loading
if (i + threadIdx.y < K) {
A_tiled[threadIdx.x][threadIdx.y] = A[r * K + i + threadIdx.y];
}
if (i + threadIdx.x < K) {
B_tiled[threadIdx.x][threadIdx.y] = B[(i + threadIdx.x) * M + c];
}
__syncthreads();
for (int j = 0; j < TILE_WIDTH && i + j < K; j++) {
C[pos] += A_tiled[threadIdx.x][j] * B_tiled[j][threadIdx.y];
}
__syncthreads();
}
}
}
__host__ void execute_matmul_rec_glob(float* A, float* B, float* C) {
int N_dim = (N + tpb_sqrt - 1) / tpb_sqrt;
int M_dim = (M + tpb_sqrt - 1) / tpb_sqrt;
dim3 threadsPerBlock(tpb_sqrt, tpb_sqrt);
dim3 blocksPerGrid(N_dim, M_dim);
printf("kernel_matmul_rec_glob:\n");
clock_t start, end;
float totalTime = 0;
for (int i = 0; i < numIterations; i++) {
randomize(A, N);
randomize(B, N);
start = clock();
hipLaunchKernelGGL(( matmul_rec_glob), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, A, B, C, N, K, M);
hipDeviceSynchronize();
end = clock();
totalTime += (end - start);
}
printf("Average time elapsed: %fms\n\n", totalTime * 1000.0 / CLOCKS_PER_SEC / numIterations);
}
__host__ void execute_matmul_rec_shar(float* A, float* B, float* C) {
int N_dim = (N + TILE_WIDTH - 1) / TILE_WIDTH;
int M_dim = (M + TILE_WIDTH - 1) / TILE_WIDTH;
dim3 threadsPerBlock(TILE_WIDTH, TILE_WIDTH);
dim3 blocksPerGrid(N_dim, M_dim);
printf("kernel_matmul_rec_shar:\n");
clock_t start, end;
float totalTime = 0;
for (int i = 0; i < numIterations; i++) {
randomize(A, N);
randomize(B, N);
start = clock();
hipLaunchKernelGGL(( matmul_rec_shar), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, A, B, C, N, K, M);
hipDeviceSynchronize();
end = clock();
totalTime += (end - start);
}
printf("Average time elapsed: %fms\n\n", totalTime * 1000.0 / CLOCKS_PER_SEC / numIterations);
}
__host__ void execute_serial(float* A, float* B, float* C) {
printf("serial:\n");
clock_t start, end;
float totalTime = 0;
for (int i = 0; i < numIterations; i++) {
randomize(B, N);
randomize(C, N);
int j,k;
start = clock();
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++) {
int pos = i * M + j;
C[pos] = 0;
for (int k = 0; k < K; k++) {
C[pos] += A[i*K+k] * B[k*M+j];
}
}
}
end = clock();
totalTime += (end - start);
}
printf("Average time elapsed: %fms\n\n", totalTime * 1000.0 / CLOCKS_PER_SEC / numIterations);
}
| 6fffd86956cbcd265ecfb2d739b12c3b26789131.cu | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "util.h"
#include <stdio.h>
#include <math.h>
#include <time.h>
extern void printAllCUDASpecs(cudaDeviceProp& deviceProp);
void initPrint();
/*
* Matrix Multiplication
* C = AB
*/
#define TILE_WIDTH 32
__global__ void matmul_rec_glob(float* A, float* B, float* C, long long N, long long K, long long M);
__global__ void matmul_rec_shar(float* A, float* B, float* C, long long N, long long K, long long M);
void execute_matmul_rec_glob(float *A, float *B, float *C);
void execute_matmul_rec_shar(float* A, float* B, float* C);
void execute_serial(float* A, float* B, float* C);
const long long N = (1 << 10);
const long long K = (1 << 10);
const long long M = (1 << 10);
const int numIterations = 10;
int tpb, tpb_sqrt;
int main()
{
initPrint();
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
printf("Executing with N=%lld, K=%lld, M=%lld\n", N, M, K);
float *A, *B, *C;
cudaMallocManaged(&A, N * K * sizeof(float));
cudaMallocManaged(&B, K * M * sizeof(float));
cudaMallocManaged(&C, N * M * sizeof(float));
tpb = deviceProp.maxThreadsPerBlock;
tpb_sqrt = sqrt(tpb) + 1e-9;
execute_matmul_rec_glob(A, B, C);
execute_matmul_rec_shar(A, B, C);
execute_serial(A, B, C);
cudaFree(A);
cudaFree(B);
cudaFree(C);
return 0;
}
void initPrint() {
int device;
int deviceCount;
cudaGetDeviceCount(&deviceCount);
for (device = 0; device < deviceCount; ++device) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
printf("\nDevice %d: \"%s\"\n", device, deviceProp.name);
printAllCUDASpecs(deviceProp);
}
}
__global__ void matmul_rec_glob(float* A, float* B, float* C, long long N, long long K, long long M) {
int r = blockIdx.x * blockDim.x + threadIdx.x;
int c = blockIdx.y * blockDim.y + threadIdx.y;
if (r < N && c < M) {
int pos = r * M + c;
C[pos] = 0;
for (int i = 0; i < K; i++) {
C[pos] += A[r*K+i] * B[i*M+c];
}
}
}
__global__ void matmul_rec_shar(float* A, float* B, float* C, long long N, long long K, long long M) {
int r = blockIdx.x * blockDim.x + threadIdx.x;
int c = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float A_tiled[TILE_WIDTH][TILE_WIDTH];
__shared__ float B_tiled[TILE_WIDTH][TILE_WIDTH];
if (r < N && c < M) {
int pos = r * M + c;
C[pos] = 0;
for (int i = 0; i < K; i += TILE_WIDTH) {
//collab loading
if (i + threadIdx.y < K) {
A_tiled[threadIdx.x][threadIdx.y] = A[r * K + i + threadIdx.y];
}
if (i + threadIdx.x < K) {
B_tiled[threadIdx.x][threadIdx.y] = B[(i + threadIdx.x) * M + c];
}
__syncthreads();
for (int j = 0; j < TILE_WIDTH && i + j < K; j++) {
C[pos] += A_tiled[threadIdx.x][j] * B_tiled[j][threadIdx.y];
}
__syncthreads();
}
}
}
__host__ void execute_matmul_rec_glob(float* A, float* B, float* C) {
int N_dim = (N + tpb_sqrt - 1) / tpb_sqrt;
int M_dim = (M + tpb_sqrt - 1) / tpb_sqrt;
dim3 threadsPerBlock(tpb_sqrt, tpb_sqrt);
dim3 blocksPerGrid(N_dim, M_dim);
printf("kernel_matmul_rec_glob:\n");
clock_t start, end;
float totalTime = 0;
for (int i = 0; i < numIterations; i++) {
randomize(A, N);
randomize(B, N);
start = clock();
matmul_rec_glob<<<blocksPerGrid, threadsPerBlock>>>(A, B, C, N, K, M);
cudaDeviceSynchronize();
end = clock();
totalTime += (end - start);
}
printf("Average time elapsed: %fms\n\n", totalTime * 1000.0 / CLOCKS_PER_SEC / numIterations);
}
__host__ void execute_matmul_rec_shar(float* A, float* B, float* C) {
int N_dim = (N + TILE_WIDTH - 1) / TILE_WIDTH;
int M_dim = (M + TILE_WIDTH - 1) / TILE_WIDTH;
dim3 threadsPerBlock(TILE_WIDTH, TILE_WIDTH);
dim3 blocksPerGrid(N_dim, M_dim);
printf("kernel_matmul_rec_shar:\n");
clock_t start, end;
float totalTime = 0;
for (int i = 0; i < numIterations; i++) {
randomize(A, N);
randomize(B, N);
start = clock();
matmul_rec_shar<<<blocksPerGrid, threadsPerBlock>>>(A, B, C, N, K, M);
cudaDeviceSynchronize();
end = clock();
totalTime += (end - start);
}
printf("Average time elapsed: %fms\n\n", totalTime * 1000.0 / CLOCKS_PER_SEC / numIterations);
}
__host__ void execute_serial(float* A, float* B, float* C) {
printf("serial:\n");
clock_t start, end;
float totalTime = 0;
for (int i = 0; i < numIterations; i++) {
randomize(B, N);
randomize(C, N);
int j,k;
start = clock();
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++) {
int pos = i * M + j;
C[pos] = 0;
for (int k = 0; k < K; k++) {
C[pos] += A[i*K+k] * B[k*M+j];
}
}
}
end = clock();
totalTime += (end - start);
}
printf("Average time elapsed: %fms\n\n", totalTime * 1000.0 / CLOCKS_PER_SEC / numIterations);
}
|
d6b63691eda7aa54332392ed2c4c67edb8a57145.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2017 XGBoost contributors
*/
#include <xgboost/tree_updater.h>
#include <utility>
#include <vector>
#include "../../../src/tree/param.h"
#include "updater_gpu_common.cuh"
namespace xgboost {
namespace tree {
DMLC_REGISTRY_FILE_TAG(updater_gpu);
/**
* @brief Absolute BFS order IDs to col-wise unique IDs based on user input
* @param tid the index of the element that this thread should access
* @param abs the array of absolute IDs
* @param colIds the array of column IDs for each element
* @param nodeStart the start of the node ID at this level
* @param nKeys number of nodes at this level.
* @return the uniq key
*/
static HOST_DEV_INLINE node_id_t abs2uniqKey(int tid, const node_id_t* abs,
const int* colIds, node_id_t nodeStart,
int nKeys) {
int a = abs[tid];
if (a == UNUSED_NODE) return a;
return ((a - nodeStart) + (colIds[tid] * nKeys));
}
/**
* @struct Pair
* @brief Pair used for key basd scan operations on bst_gpair
*/
struct Pair {
int key;
bst_gpair value;
};
/** define a key that's not used at all in the entire boosting process */
static const int NONE_KEY = -100;
/**
* @brief Allocate temporary buffers needed for scan operations
* @param tmpScans gradient buffer
* @param tmpKeys keys buffer
* @param size number of elements that will be scanned
*/
template <int BLKDIM_L1L3 = 256>
int scanTempBufferSize(int size) {
int nBlks = dh::div_round_up(size, BLKDIM_L1L3);
return nBlks;
}
struct AddByKey {
template <typename T>
HOST_DEV_INLINE T operator()(const T& first, const T& second) const {
T result;
if (first.key == second.key) {
result.key = first.key;
result.value = first.value + second.value;
} else {
result.key = second.key;
result.value = second.value;
}
return result;
}
};
/**
* @brief Gradient value getter function
* @param id the index into the vals or instIds array to which to fetch
* @param vals the gradient value buffer
* @param instIds instance index buffer
* @return the expected gradient value
*/
HOST_DEV_INLINE bst_gpair get(int id, const bst_gpair* vals,
const int* instIds) {
id = instIds[id];
return vals[id];
}
template <int BLKDIM_L1L3>
__global__ void cubScanByKeyL1(bst_gpair* scans, const bst_gpair* vals,
const int* instIds, bst_gpair* mScans,
int* mKeys, const node_id_t* keys, int nUniqKeys,
const int* colIds, node_id_t nodeStart,
const int size) {
Pair rootPair = {NONE_KEY, bst_gpair(0.f, 0.f)};
int myKey;
bst_gpair myValue;
typedef hipcub::BlockScan<Pair, BLKDIM_L1L3> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
Pair threadData;
int tid = blockIdx.x * BLKDIM_L1L3 + threadIdx.x;
if (tid < size) {
myKey = abs2uniqKey(tid, keys, colIds, nodeStart, nUniqKeys);
myValue = get(tid, vals, instIds);
} else {
myKey = NONE_KEY;
myValue = 0.f;
}
threadData.key = myKey;
threadData.value = myValue;
// get previous key, especially needed for the last thread in this block
// in order to pass on the partial scan values.
// this statement MUST appear before the checks below!
// else, the result of this shuffle operation will be undefined
int previousKey = __shfl_up(myKey, 1);
// Collectively compute the block-wide exclusive prefix sum
BlockScan(temp_storage)
.ExclusiveScan(threadData, threadData, rootPair, AddByKey());
if (tid < size) {
scans[tid] = threadData.value;
} else {
return;
}
if (threadIdx.x == BLKDIM_L1L3 - 1) {
threadData.value =
(myKey == previousKey) ? threadData.value : bst_gpair(0.0f, 0.0f);
mKeys[blockIdx.x] = myKey;
mScans[blockIdx.x] = threadData.value + myValue;
}
}
template <int BLKSIZE>
__global__ void cubScanByKeyL2(bst_gpair* mScans, int* mKeys, int mLength) {
typedef hipcub::BlockScan<Pair, BLKSIZE, cub::BLOCK_SCAN_WARP_SCANS> BlockScan;
Pair threadData;
__shared__ typename BlockScan::TempStorage temp_storage;
for (int i = threadIdx.x; i < mLength; i += BLKSIZE - 1) {
threadData.key = mKeys[i];
threadData.value = mScans[i];
BlockScan(temp_storage).InclusiveScan(threadData, threadData, AddByKey());
mScans[i] = threadData.value;
__syncthreads();
}
}
template <int BLKDIM_L1L3>
__global__ void cubScanByKeyL3(bst_gpair* sums, bst_gpair* scans,
const bst_gpair* vals, const int* instIds,
const bst_gpair* mScans, const int* mKeys,
const node_id_t* keys, int nUniqKeys,
const int* colIds, node_id_t nodeStart,
const int size) {
int relId = threadIdx.x;
int tid = (blockIdx.x * BLKDIM_L1L3) + relId;
// to avoid the following warning from nvcc:
// __shared__ memory variable with non-empty constructor or destructor
// (potential race between threads)
__shared__ char gradBuff[sizeof(bst_gpair)];
__shared__ int s_mKeys;
bst_gpair* s_mScans = reinterpret_cast<bst_gpair*>(gradBuff);
if (tid >= size) return;
// cache block-wide partial scan info
if (relId == 0) {
s_mKeys = (blockIdx.x > 0) ? mKeys[blockIdx.x - 1] : NONE_KEY;
s_mScans[0] = (blockIdx.x > 0) ? mScans[blockIdx.x - 1] : bst_gpair();
}
int myKey = abs2uniqKey(tid, keys, colIds, nodeStart, nUniqKeys);
int previousKey =
tid == 0 ? NONE_KEY
: abs2uniqKey(tid - 1, keys, colIds, nodeStart, nUniqKeys);
bst_gpair myValue = scans[tid];
__syncthreads();
if (blockIdx.x > 0 && s_mKeys == previousKey) {
myValue += s_mScans[0];
}
if (tid == size - 1) {
sums[previousKey] = myValue + get(tid, vals, instIds);
}
if ((previousKey != myKey) && (previousKey >= 0)) {
sums[previousKey] = myValue;
myValue = bst_gpair(0.0f, 0.0f);
}
scans[tid] = myValue;
}
/**
* @brief Performs fused reduce and scan by key functionality. It is assumed
* that
* the keys occur contiguously!
* @param sums the output gradient reductions for each element performed
* key-wise
* @param scans the output gradient scans for each element performed key-wise
* @param vals the gradients evaluated for each observation.
* @param instIds instance ids for each element
* @param keys keys to be used to segment the reductions. They need not occur
* contiguously in contrast to scan_by_key. Currently, we need one key per
* value in the 'vals' array.
* @param size number of elements in the 'vals' array
* @param nUniqKeys max number of uniq keys found per column
* @param nCols number of columns
* @param tmpScans temporary scan buffer needed for cub-pyramid algo
* @param tmpKeys temporary key buffer needed for cub-pyramid algo
* @param colIds column indices for each element in the array
* @param nodeStart index of the leftmost node in the current level
*/
template <int BLKDIM_L1L3 = 256, int BLKDIM_L2 = 512>
void reduceScanByKey(bst_gpair* sums, bst_gpair* scans, const bst_gpair* vals,
const int* instIds, const node_id_t* keys, int size,
int nUniqKeys, int nCols, bst_gpair* tmpScans,
int* tmpKeys, const int* colIds, node_id_t nodeStart) {
int nBlks = dh::div_round_up(size, BLKDIM_L1L3);
hipMemset(sums, 0, nUniqKeys * nCols * sizeof(bst_gpair));
hipLaunchKernelGGL(( cubScanByKeyL1<BLKDIM_L1L3>)
, dim3(nBlks), dim3(BLKDIM_L1L3), 0, 0, scans, vals, instIds, tmpScans, tmpKeys, keys,
nUniqKeys, colIds, nodeStart, size);
hipLaunchKernelGGL(( cubScanByKeyL2<BLKDIM_L2>), dim3(1), dim3(BLKDIM_L2), 0, 0, tmpScans, tmpKeys, nBlks);
hipLaunchKernelGGL(( cubScanByKeyL3<BLKDIM_L1L3>)
, dim3(nBlks), dim3(BLKDIM_L1L3), 0, 0, sums, scans, vals, instIds, tmpScans, tmpKeys,
keys, nUniqKeys, colIds, nodeStart, size);
}
/**
* @struct ExactSplitCandidate
* @brief Abstraction of a possible split in the decision tree
*/
struct ExactSplitCandidate {
/** the optimal gain score for this node */
float score;
/** index where to split in the DMatrix */
int index;
HOST_DEV_INLINE ExactSplitCandidate() : score(-FLT_MAX), index(INT_MAX) {}
/**
* @brief Whether the split info is valid to be used to create a new child
* @param minSplitLoss minimum score above which decision to split is made
* @return true if splittable, else false
*/
HOST_DEV_INLINE bool isSplittable(float minSplitLoss) const {
return ((score >= minSplitLoss) && (index != INT_MAX));
}
};
/**
* @enum ArgMaxByKeyAlgo best_split_evaluation.cuh
* @brief Help decide which algorithm to use for multi-argmax operation
*/
enum ArgMaxByKeyAlgo {
/** simplest, use gmem-atomics for all updates */
ABK_GMEM = 0,
/** use smem-atomics for updates (when number of keys are less) */
ABK_SMEM
};
/** max depth until which to use shared mem based atomics for argmax */
static const int MAX_ABK_LEVELS = 3;
HOST_DEV_INLINE ExactSplitCandidate maxSplit(ExactSplitCandidate a,
ExactSplitCandidate b) {
ExactSplitCandidate out;
if (a.score < b.score) {
out.score = b.score;
out.index = b.index;
} else if (a.score == b.score) {
out.score = a.score;
out.index = (a.index < b.index) ? a.index : b.index;
} else {
out.score = a.score;
out.index = a.index;
}
return out;
}
DEV_INLINE void atomicArgMax(ExactSplitCandidate* address,
ExactSplitCandidate val) {
unsigned long long* intAddress = (unsigned long long*)address; // NOLINT
unsigned long long old = *intAddress; // NOLINT
unsigned long long assumed; // NOLINT
do {
assumed = old;
ExactSplitCandidate res =
maxSplit(val, *reinterpret_cast<ExactSplitCandidate*>(&assumed));
old = atomicCAS(intAddress, assumed, *reinterpret_cast<uint64_t*>(&res));
} while (assumed != old);
}
DEV_INLINE void argMaxWithAtomics(
int id, ExactSplitCandidate* nodeSplits, const bst_gpair* gradScans,
const bst_gpair* gradSums, const float* vals, const int* colIds,
const node_id_t* nodeAssigns, const DeviceDenseNode* nodes, int nUniqKeys,
node_id_t nodeStart, int len, const GPUTrainingParam& param) {
int nodeId = nodeAssigns[id];
// @todo: this is really a bad check! but will be fixed when we move
// to key-based reduction
if ((id == 0) ||
!((nodeId == nodeAssigns[id - 1]) && (colIds[id] == colIds[id - 1]) &&
(vals[id] == vals[id - 1]))) {
if (nodeId != UNUSED_NODE) {
int sumId = abs2uniqKey(id, nodeAssigns, colIds, nodeStart, nUniqKeys);
bst_gpair colSum = gradSums[sumId];
int uid = nodeId - nodeStart;
DeviceDenseNode n = nodes[nodeId];
bst_gpair parentSum = n.sum_gradients;
float parentGain = n.root_gain;
bool tmp;
ExactSplitCandidate s;
bst_gpair missing = parentSum - colSum;
s.score = loss_chg_missing(gradScans[id], missing, parentSum, parentGain,
param, tmp);
s.index = id;
atomicArgMax(nodeSplits + uid, s);
} // end if nodeId != UNUSED_NODE
} // end if id == 0 ...
}
__global__ void atomicArgMaxByKeyGmem(
ExactSplitCandidate* nodeSplits, const bst_gpair* gradScans,
const bst_gpair* gradSums, const float* vals, const int* colIds,
const node_id_t* nodeAssigns, const DeviceDenseNode* nodes, int nUniqKeys,
node_id_t nodeStart, int len, const TrainParam param) {
int id = threadIdx.x + (blockIdx.x * blockDim.x);
const int stride = blockDim.x * gridDim.x;
for (; id < len; id += stride) {
argMaxWithAtomics(id, nodeSplits, gradScans, gradSums, vals, colIds,
nodeAssigns, nodes, nUniqKeys, nodeStart, len,
GPUTrainingParam(param));
}
}
__global__ void atomicArgMaxByKeySmem(
ExactSplitCandidate* nodeSplits, const bst_gpair* gradScans,
const bst_gpair* gradSums, const float* vals, const int* colIds,
const node_id_t* nodeAssigns, const DeviceDenseNode* nodes, int nUniqKeys,
node_id_t nodeStart, int len, const TrainParam param) {
extern __shared__ char sArr[];
ExactSplitCandidate* sNodeSplits =
reinterpret_cast<ExactSplitCandidate*>(sArr);
int tid = threadIdx.x;
ExactSplitCandidate defVal;
#pragma unroll 1
for (int i = tid; i < nUniqKeys; i += blockDim.x) {
sNodeSplits[i] = defVal;
}
__syncthreads();
int id = tid + (blockIdx.x * blockDim.x);
const int stride = blockDim.x * gridDim.x;
for (; id < len; id += stride) {
argMaxWithAtomics(id, sNodeSplits, gradScans, gradSums, vals, colIds,
nodeAssigns, nodes, nUniqKeys, nodeStart, len, param);
}
__syncthreads();
for (int i = tid; i < nUniqKeys; i += blockDim.x) {
ExactSplitCandidate s = sNodeSplits[i];
atomicArgMax(nodeSplits + i, s);
}
}
/**
* @brief Performs argmax_by_key functionality but for cases when keys need not
* occur contiguously
* @param nodeSplits will contain information on best split for each node
* @param gradScans exclusive sum on sorted segments for each col
* @param gradSums gradient sum for each column in DMatrix based on to node-ids
* @param vals feature values
* @param colIds column index for each element in the feature values array
* @param nodeAssigns node-id assignments to each element in DMatrix
* @param nodes pointer to all nodes for this tree in BFS order
* @param nUniqKeys number of unique node-ids in this level
* @param nodeStart start index of the node-ids in this level
* @param len number of elements
* @param param training parameters
* @param algo which algorithm to use for argmax_by_key
*/
template <int BLKDIM = 256, int ITEMS_PER_THREAD = 4>
void argMaxByKey(ExactSplitCandidate* nodeSplits, const bst_gpair* gradScans,
const bst_gpair* gradSums, const float* vals,
const int* colIds, const node_id_t* nodeAssigns,
const DeviceDenseNode* nodes, int nUniqKeys,
node_id_t nodeStart, int len, const TrainParam param,
ArgMaxByKeyAlgo algo) {
dh::fillConst<ExactSplitCandidate, BLKDIM, ITEMS_PER_THREAD>(
dh::get_device_idx(param.gpu_id), nodeSplits, nUniqKeys,
ExactSplitCandidate());
int nBlks = dh::div_round_up(len, ITEMS_PER_THREAD * BLKDIM);
switch (algo) {
case ABK_GMEM:
hipLaunchKernelGGL(( atomicArgMaxByKeyGmem), dim3(nBlks), dim3(BLKDIM), 0, 0,
nodeSplits, gradScans, gradSums, vals, colIds, nodeAssigns, nodes,
nUniqKeys, nodeStart, len, param);
break;
case ABK_SMEM:
hipLaunchKernelGGL(( atomicArgMaxByKeySmem), dim3(nBlks), dim3(BLKDIM),
sizeof(ExactSplitCandidate) * nUniqKeys, 0,
nodeSplits, gradScans, gradSums, vals, colIds, nodeAssigns, nodes,
nUniqKeys, nodeStart, len, param);
break;
default:
throw std::runtime_error("argMaxByKey: Bad algo passed!");
}
}
__global__ void assignColIds(int* colIds, const int* colOffsets) {
int myId = blockIdx.x;
int start = colOffsets[myId];
int end = colOffsets[myId + 1];
for (int id = start + threadIdx.x; id < end; id += blockDim.x) {
colIds[id] = myId;
}
}
__global__ void fillDefaultNodeIds(node_id_t* nodeIdsPerInst,
const DeviceDenseNode* nodes, int nRows) {
int id = threadIdx.x + (blockIdx.x * blockDim.x);
if (id >= nRows) {
return;
}
// if this element belongs to none of the currently active node-id's
node_id_t nId = nodeIdsPerInst[id];
if (nId == UNUSED_NODE) {
return;
}
const DeviceDenseNode n = nodes[nId];
node_id_t result;
if (n.IsLeaf() || n.IsUnused()) {
result = UNUSED_NODE;
} else if (n.dir == LeftDir) {
result = (2 * n.idx) + 1;
} else {
result = (2 * n.idx) + 2;
}
nodeIdsPerInst[id] = result;
}
__global__ void assignNodeIds(node_id_t* nodeIdsPerInst, int* nodeLocations,
const node_id_t* nodeIds, const int* instId,
const DeviceDenseNode* nodes,
const int* colOffsets, const float* vals,
int nVals, int nCols) {
int id = threadIdx.x + (blockIdx.x * blockDim.x);
const int stride = blockDim.x * gridDim.x;
for (; id < nVals; id += stride) {
// fusing generation of indices for node locations
nodeLocations[id] = id;
// using nodeIds here since the previous kernel would have updated
// the nodeIdsPerInst with all default assignments
int nId = nodeIds[id];
// if this element belongs to none of the currently active node-id's
if (nId != UNUSED_NODE) {
const DeviceDenseNode n = nodes[nId];
int colId = n.fidx;
// printf("nid=%d colId=%d id=%d\n", nId, colId, id);
int start = colOffsets[colId];
int end = colOffsets[colId + 1];
// @todo: too much wasteful threads!!
if ((id >= start) && (id < end) && !(n.IsLeaf() || n.IsUnused())) {
node_id_t result = (2 * n.idx) + 1 + (vals[id] >= n.fvalue);
nodeIdsPerInst[instId[id]] = result;
}
}
}
}
__global__ void markLeavesKernel(DeviceDenseNode* nodes, int len) {
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
if ((id < len) && !nodes[id].IsUnused()) {
int lid = (id << 1) + 1;
int rid = (id << 1) + 2;
if ((lid >= len) || (rid >= len)) {
nodes[id].root_gain = -FLT_MAX; // bottom-most nodes
} else if (nodes[lid].IsUnused() && nodes[rid].IsUnused()) {
nodes[id].root_gain = -FLT_MAX; // unused child nodes
}
}
}
class GPUMaker : public TreeUpdater {
protected:
TrainParam param;
/** whether we have initialized memory already (so as not to repeat!) */
bool allocated;
/** feature values stored in column-major compressed format */
dh::dvec2<float> vals;
dh::dvec<float> vals_cached;
/** corresponding instance id's of these featutre values */
dh::dvec2<int> instIds;
dh::dvec<int> instIds_cached;
/** column offsets for these feature values */
dh::dvec<int> colOffsets;
dh::dvec<bst_gpair> gradsInst;
dh::dvec2<node_id_t> nodeAssigns;
dh::dvec2<int> nodeLocations;
dh::dvec<DeviceDenseNode> nodes;
dh::dvec<node_id_t> nodeAssignsPerInst;
dh::dvec<bst_gpair> gradSums;
dh::dvec<bst_gpair> gradScans;
dh::dvec<ExactSplitCandidate> nodeSplits;
int nVals;
int nRows;
int nCols;
int maxNodes;
int maxLeaves;
dh::CubMemory tmp_mem;
dh::dvec<bst_gpair> tmpScanGradBuff;
dh::dvec<int> tmpScanKeyBuff;
dh::dvec<int> colIds;
dh::bulk_allocator<dh::memory_type::DEVICE> ba;
public:
GPUMaker() : allocated(false) {}
~GPUMaker() {}
void Init(
const std::vector<std::pair<std::string, std::string>>& args) override {
param.InitAllowUnknown(args);
maxNodes = (1 << (param.max_depth + 1)) - 1;
maxLeaves = 1 << param.max_depth;
}
void Update(const std::vector<bst_gpair>& gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) override {
GradStats::CheckInfo(dmat->info());
// rescale learning rate according to size of trees
float lr = param.learning_rate;
param.learning_rate = lr / trees.size();
try {
// build tree
for (size_t i = 0; i < trees.size(); ++i) {
UpdateTree(gpair, dmat, trees[i]);
}
} catch (const std::exception& e) {
LOG(FATAL) << "GPU plugin exception: " << e.what() << std::endl;
}
param.learning_rate = lr;
}
/// @note: Update should be only after Init!!
void UpdateTree(const std::vector<bst_gpair>& gpair, DMatrix* dmat,
RegTree* hTree) {
if (!allocated) {
setupOneTimeData(dmat);
}
for (int i = 0; i < param.max_depth; ++i) {
if (i == 0) {
// make sure to start on a fresh tree with sorted values!
vals.current_dvec() = vals_cached;
instIds.current_dvec() = instIds_cached;
transferGrads(gpair);
}
int nNodes = 1 << i;
node_id_t nodeStart = nNodes - 1;
initNodeData(i, nodeStart, nNodes);
findSplit(i, nodeStart, nNodes);
}
// mark all the used nodes with unused children as leaf nodes
markLeaves();
dense2sparse_tree(hTree, nodes, param);
}
void split2node(int nNodes, node_id_t nodeStart) {
auto d_nodes = nodes.data();
auto d_gradScans = gradScans.data();
auto d_gradSums = gradSums.data();
auto d_nodeAssigns = nodeAssigns.current();
auto d_colIds = colIds.data();
auto d_vals = vals.current();
auto d_nodeSplits = nodeSplits.data();
int nUniqKeys = nNodes;
float min_split_loss = param.min_split_loss;
auto gpu_param = GPUTrainingParam(param);
dh::launch_n(param.gpu_id, nNodes, [=] __device__(int uid) {
int absNodeId = uid + nodeStart;
ExactSplitCandidate s = d_nodeSplits[uid];
if (s.isSplittable(min_split_loss)) {
int idx = s.index;
int nodeInstId =
abs2uniqKey(idx, d_nodeAssigns, d_colIds, nodeStart, nUniqKeys);
bool missingLeft = true;
const DeviceDenseNode& n = d_nodes[absNodeId];
bst_gpair gradScan = d_gradScans[idx];
bst_gpair gradSum = d_gradSums[nodeInstId];
float thresh = d_vals[idx];
int colId = d_colIds[idx];
// get the default direction for the current node
bst_gpair missing = n.sum_gradients - gradSum;
loss_chg_missing(gradScan, missing, n.sum_gradients, n.root_gain,
gpu_param, missingLeft);
// get the score/weight/id/gradSum for left and right child nodes
bst_gpair lGradSum = missingLeft ? gradScan + missing : gradScan;
bst_gpair rGradSum = n.sum_gradients - lGradSum;
// Create children
d_nodes[left_child_nidx(absNodeId)] =
DeviceDenseNode(lGradSum, left_child_nidx(absNodeId), gpu_param);
d_nodes[right_child_nidx(absNodeId)] =
DeviceDenseNode(rGradSum, right_child_nidx(absNodeId), gpu_param);
// Set split for parent
d_nodes[absNodeId].SetSplit(thresh, colId,
missingLeft ? LeftDir : RightDir);
} else {
// cannot be split further, so this node is a leaf!
d_nodes[absNodeId].root_gain = -FLT_MAX;
}
});
}
void findSplit(int level, node_id_t nodeStart, int nNodes) {
reduceScanByKey(gradSums.data(), gradScans.data(), gradsInst.data(),
instIds.current(), nodeAssigns.current(), nVals, nNodes,
nCols, tmpScanGradBuff.data(), tmpScanKeyBuff.data(),
colIds.data(), nodeStart);
argMaxByKey(nodeSplits.data(), gradScans.data(), gradSums.data(),
vals.current(), colIds.data(), nodeAssigns.current(),
nodes.data(), nNodes, nodeStart, nVals, param,
level <= MAX_ABK_LEVELS ? ABK_SMEM : ABK_GMEM);
split2node(nNodes, nodeStart);
}
void allocateAllData(int offsetSize) {
int tmpBuffSize = scanTempBufferSize(nVals);
ba.allocate(dh::get_device_idx(param.gpu_id), param.silent, &vals, nVals,
&vals_cached, nVals, &instIds, nVals, &instIds_cached, nVals,
&colOffsets, offsetSize, &gradsInst, nRows, &nodeAssigns, nVals,
&nodeLocations, nVals, &nodes, maxNodes, &nodeAssignsPerInst,
nRows, &gradSums, maxLeaves * nCols, &gradScans, nVals,
&nodeSplits, maxLeaves, &tmpScanGradBuff, tmpBuffSize,
&tmpScanKeyBuff, tmpBuffSize, &colIds, nVals);
}
void setupOneTimeData(DMatrix* dmat) {
size_t free_memory = dh::available_memory(dh::get_device_idx(param.gpu_id));
if (!dmat->SingleColBlock()) {
throw std::runtime_error("exact::GPUBuilder - must have 1 column block");
}
std::vector<float> fval;
std::vector<int> fId, offset;
convertToCsc(dmat, &fval, &fId, &offset);
allocateAllData(static_cast<int>(offset.size()));
transferAndSortData(fval, fId, offset);
allocated = true;
}
void convertToCsc(DMatrix* dmat, std::vector<float>* fval,
std::vector<int>* fId, std::vector<int>* offset) {
MetaInfo info = dmat->info();
nRows = info.num_row;
nCols = info.num_col;
offset->reserve(nCols + 1);
offset->push_back(0);
fval->reserve(nCols * nRows);
fId->reserve(nCols * nRows);
// in case you end up with a DMatrix having no column access
// then make sure to enable that before copying the data!
if (!dmat->HaveColAccess()) {
const std::vector<bool> enable(nCols, true);
dmat->InitColAccess(enable, 1, nRows);
}
dmlc::DataIter<ColBatch>* iter = dmat->ColIterator();
iter->BeforeFirst();
while (iter->Next()) {
const ColBatch& batch = iter->Value();
for (int i = 0; i < batch.size; i++) {
const ColBatch::Inst& col = batch[i];
for (const ColBatch::Entry* it = col.data; it != col.data + col.length;
it++) {
int inst_id = static_cast<int>(it->index);
fval->push_back(it->fvalue);
fId->push_back(inst_id);
}
offset->push_back(fval->size());
}
}
nVals = fval->size();
}
void transferAndSortData(const std::vector<float>& fval,
const std::vector<int>& fId,
const std::vector<int>& offset) {
vals.current_dvec() = fval;
instIds.current_dvec() = fId;
colOffsets = offset;
dh::segmentedSort<float, int>(&tmp_mem, &vals, &instIds, nVals, nCols,
colOffsets);
vals_cached = vals.current_dvec();
instIds_cached = instIds.current_dvec();
hipLaunchKernelGGL(( assignColIds), dim3(nCols), dim3(512), 0, 0, colIds.data(), colOffsets.data());
}
void transferGrads(const std::vector<bst_gpair>& gpair) {
// HACK
dh::safe_cuda(hipMemcpy(gradsInst.data(), &(gpair[0]),
sizeof(bst_gpair) * nRows,
hipMemcpyHostToDevice));
// evaluate the full-grad reduction for the root node
dh::sumReduction<bst_gpair>(tmp_mem, gradsInst, gradSums, nRows);
}
void initNodeData(int level, node_id_t nodeStart, int nNodes) {
// all instances belong to root node at the beginning!
if (level == 0) {
nodes.fill(DeviceDenseNode());
nodeAssigns.current_dvec().fill(0);
nodeAssignsPerInst.fill(0);
// for root node, just update the gradient/score/weight/id info
// before splitting it! Currently all data is on GPU, hence this
// stupid little kernel
auto d_nodes = nodes.data();
auto d_sums = gradSums.data();
auto gpu_params = GPUTrainingParam(param);
dh::launch_n(param.gpu_id, 1, [=] __device__(int idx) {
d_nodes[0] = DeviceDenseNode(d_sums[0], 0, gpu_params);
});
} else {
const int BlkDim = 256;
const int ItemsPerThread = 4;
// assign default node ids first
int nBlks = dh::div_round_up(nRows, BlkDim);
hipLaunchKernelGGL(( fillDefaultNodeIds), dim3(nBlks), dim3(BlkDim), 0, 0, nodeAssignsPerInst.data(),
nodes.data(), nRows);
// evaluate the correct child indices of non-missing values next
nBlks = dh::div_round_up(nVals, BlkDim * ItemsPerThread);
hipLaunchKernelGGL(( assignNodeIds), dim3(nBlks), dim3(BlkDim), 0, 0,
nodeAssignsPerInst.data(), nodeLocations.current(),
nodeAssigns.current(), instIds.current(), nodes.data(),
colOffsets.data(), vals.current(), nVals, nCols);
// gather the node assignments across all other columns too
dh::gather(dh::get_device_idx(param.gpu_id), nodeAssigns.current(),
nodeAssignsPerInst.data(), instIds.current(), nVals);
sortKeys(level);
}
}
void sortKeys(int level) {
// segmented-sort the arrays based on node-id's
// but we don't need more than level+1 bits for sorting!
segmentedSort(&tmp_mem, &nodeAssigns, &nodeLocations, nVals, nCols,
colOffsets, 0, level + 1);
dh::gather<float, int>(dh::get_device_idx(param.gpu_id), vals.other(),
vals.current(), instIds.other(), instIds.current(),
nodeLocations.current(), nVals);
vals.buff().selector ^= 1;
instIds.buff().selector ^= 1;
}
void markLeaves() {
const int BlkDim = 128;
int nBlks = dh::div_round_up(maxNodes, BlkDim);
hipLaunchKernelGGL(( markLeavesKernel), dim3(nBlks), dim3(BlkDim), 0, 0, nodes.data(), maxNodes);
}
};
XGBOOST_REGISTER_TREE_UPDATER(GPUMaker, "grow_gpu")
.describe("Grow tree with GPU.")
.set_body([]() { return new GPUMaker(); });
} // namespace tree
} // namespace xgboost
| d6b63691eda7aa54332392ed2c4c67edb8a57145.cu | /*!
* Copyright 2017 XGBoost contributors
*/
#include <xgboost/tree_updater.h>
#include <utility>
#include <vector>
#include "../../../src/tree/param.h"
#include "updater_gpu_common.cuh"
namespace xgboost {
namespace tree {
DMLC_REGISTRY_FILE_TAG(updater_gpu);
/**
* @brief Absolute BFS order IDs to col-wise unique IDs based on user input
* @param tid the index of the element that this thread should access
* @param abs the array of absolute IDs
* @param colIds the array of column IDs for each element
* @param nodeStart the start of the node ID at this level
* @param nKeys number of nodes at this level.
* @return the uniq key
*/
static HOST_DEV_INLINE node_id_t abs2uniqKey(int tid, const node_id_t* abs,
const int* colIds, node_id_t nodeStart,
int nKeys) {
int a = abs[tid];
if (a == UNUSED_NODE) return a;
return ((a - nodeStart) + (colIds[tid] * nKeys));
}
/**
* @struct Pair
* @brief Pair used for key basd scan operations on bst_gpair
*/
struct Pair {
int key;
bst_gpair value;
};
/** define a key that's not used at all in the entire boosting process */
static const int NONE_KEY = -100;
/**
* @brief Allocate temporary buffers needed for scan operations
* @param tmpScans gradient buffer
* @param tmpKeys keys buffer
* @param size number of elements that will be scanned
*/
template <int BLKDIM_L1L3 = 256>
int scanTempBufferSize(int size) {
int nBlks = dh::div_round_up(size, BLKDIM_L1L3);
return nBlks;
}
struct AddByKey {
template <typename T>
HOST_DEV_INLINE T operator()(const T& first, const T& second) const {
T result;
if (first.key == second.key) {
result.key = first.key;
result.value = first.value + second.value;
} else {
result.key = second.key;
result.value = second.value;
}
return result;
}
};
/**
* @brief Gradient value getter function
* @param id the index into the vals or instIds array to which to fetch
* @param vals the gradient value buffer
* @param instIds instance index buffer
* @return the expected gradient value
*/
HOST_DEV_INLINE bst_gpair get(int id, const bst_gpair* vals,
const int* instIds) {
id = instIds[id];
return vals[id];
}
template <int BLKDIM_L1L3>
__global__ void cubScanByKeyL1(bst_gpair* scans, const bst_gpair* vals,
const int* instIds, bst_gpair* mScans,
int* mKeys, const node_id_t* keys, int nUniqKeys,
const int* colIds, node_id_t nodeStart,
const int size) {
Pair rootPair = {NONE_KEY, bst_gpair(0.f, 0.f)};
int myKey;
bst_gpair myValue;
typedef cub::BlockScan<Pair, BLKDIM_L1L3> BlockScan;
__shared__ typename BlockScan::TempStorage temp_storage;
Pair threadData;
int tid = blockIdx.x * BLKDIM_L1L3 + threadIdx.x;
if (tid < size) {
myKey = abs2uniqKey(tid, keys, colIds, nodeStart, nUniqKeys);
myValue = get(tid, vals, instIds);
} else {
myKey = NONE_KEY;
myValue = 0.f;
}
threadData.key = myKey;
threadData.value = myValue;
// get previous key, especially needed for the last thread in this block
// in order to pass on the partial scan values.
// this statement MUST appear before the checks below!
// else, the result of this shuffle operation will be undefined
int previousKey = __shfl_up(myKey, 1);
// Collectively compute the block-wide exclusive prefix sum
BlockScan(temp_storage)
.ExclusiveScan(threadData, threadData, rootPair, AddByKey());
if (tid < size) {
scans[tid] = threadData.value;
} else {
return;
}
if (threadIdx.x == BLKDIM_L1L3 - 1) {
threadData.value =
(myKey == previousKey) ? threadData.value : bst_gpair(0.0f, 0.0f);
mKeys[blockIdx.x] = myKey;
mScans[blockIdx.x] = threadData.value + myValue;
}
}
template <int BLKSIZE>
__global__ void cubScanByKeyL2(bst_gpair* mScans, int* mKeys, int mLength) {
typedef cub::BlockScan<Pair, BLKSIZE, cub::BLOCK_SCAN_WARP_SCANS> BlockScan;
Pair threadData;
__shared__ typename BlockScan::TempStorage temp_storage;
for (int i = threadIdx.x; i < mLength; i += BLKSIZE - 1) {
threadData.key = mKeys[i];
threadData.value = mScans[i];
BlockScan(temp_storage).InclusiveScan(threadData, threadData, AddByKey());
mScans[i] = threadData.value;
__syncthreads();
}
}
template <int BLKDIM_L1L3>
__global__ void cubScanByKeyL3(bst_gpair* sums, bst_gpair* scans,
const bst_gpair* vals, const int* instIds,
const bst_gpair* mScans, const int* mKeys,
const node_id_t* keys, int nUniqKeys,
const int* colIds, node_id_t nodeStart,
const int size) {
int relId = threadIdx.x;
int tid = (blockIdx.x * BLKDIM_L1L3) + relId;
// to avoid the following warning from nvcc:
// __shared__ memory variable with non-empty constructor or destructor
// (potential race between threads)
__shared__ char gradBuff[sizeof(bst_gpair)];
__shared__ int s_mKeys;
bst_gpair* s_mScans = reinterpret_cast<bst_gpair*>(gradBuff);
if (tid >= size) return;
// cache block-wide partial scan info
if (relId == 0) {
s_mKeys = (blockIdx.x > 0) ? mKeys[blockIdx.x - 1] : NONE_KEY;
s_mScans[0] = (blockIdx.x > 0) ? mScans[blockIdx.x - 1] : bst_gpair();
}
int myKey = abs2uniqKey(tid, keys, colIds, nodeStart, nUniqKeys);
int previousKey =
tid == 0 ? NONE_KEY
: abs2uniqKey(tid - 1, keys, colIds, nodeStart, nUniqKeys);
bst_gpair myValue = scans[tid];
__syncthreads();
if (blockIdx.x > 0 && s_mKeys == previousKey) {
myValue += s_mScans[0];
}
if (tid == size - 1) {
sums[previousKey] = myValue + get(tid, vals, instIds);
}
if ((previousKey != myKey) && (previousKey >= 0)) {
sums[previousKey] = myValue;
myValue = bst_gpair(0.0f, 0.0f);
}
scans[tid] = myValue;
}
/**
* @brief Performs fused reduce and scan by key functionality. It is assumed
* that
* the keys occur contiguously!
* @param sums the output gradient reductions for each element performed
* key-wise
* @param scans the output gradient scans for each element performed key-wise
* @param vals the gradients evaluated for each observation.
* @param instIds instance ids for each element
* @param keys keys to be used to segment the reductions. They need not occur
* contiguously in contrast to scan_by_key. Currently, we need one key per
* value in the 'vals' array.
* @param size number of elements in the 'vals' array
* @param nUniqKeys max number of uniq keys found per column
* @param nCols number of columns
* @param tmpScans temporary scan buffer needed for cub-pyramid algo
* @param tmpKeys temporary key buffer needed for cub-pyramid algo
* @param colIds column indices for each element in the array
* @param nodeStart index of the leftmost node in the current level
*/
template <int BLKDIM_L1L3 = 256, int BLKDIM_L2 = 512>
void reduceScanByKey(bst_gpair* sums, bst_gpair* scans, const bst_gpair* vals,
const int* instIds, const node_id_t* keys, int size,
int nUniqKeys, int nCols, bst_gpair* tmpScans,
int* tmpKeys, const int* colIds, node_id_t nodeStart) {
int nBlks = dh::div_round_up(size, BLKDIM_L1L3);
cudaMemset(sums, 0, nUniqKeys * nCols * sizeof(bst_gpair));
cubScanByKeyL1<BLKDIM_L1L3>
<<<nBlks, BLKDIM_L1L3>>>(scans, vals, instIds, tmpScans, tmpKeys, keys,
nUniqKeys, colIds, nodeStart, size);
cubScanByKeyL2<BLKDIM_L2><<<1, BLKDIM_L2>>>(tmpScans, tmpKeys, nBlks);
cubScanByKeyL3<BLKDIM_L1L3>
<<<nBlks, BLKDIM_L1L3>>>(sums, scans, vals, instIds, tmpScans, tmpKeys,
keys, nUniqKeys, colIds, nodeStart, size);
}
/**
* @struct ExactSplitCandidate
* @brief Abstraction of a possible split in the decision tree
*/
struct ExactSplitCandidate {
/** the optimal gain score for this node */
float score;
/** index where to split in the DMatrix */
int index;
HOST_DEV_INLINE ExactSplitCandidate() : score(-FLT_MAX), index(INT_MAX) {}
/**
* @brief Whether the split info is valid to be used to create a new child
* @param minSplitLoss minimum score above which decision to split is made
* @return true if splittable, else false
*/
HOST_DEV_INLINE bool isSplittable(float minSplitLoss) const {
return ((score >= minSplitLoss) && (index != INT_MAX));
}
};
/**
* @enum ArgMaxByKeyAlgo best_split_evaluation.cuh
* @brief Help decide which algorithm to use for multi-argmax operation
*/
enum ArgMaxByKeyAlgo {
/** simplest, use gmem-atomics for all updates */
ABK_GMEM = 0,
/** use smem-atomics for updates (when number of keys are less) */
ABK_SMEM
};
/** max depth until which to use shared mem based atomics for argmax */
static const int MAX_ABK_LEVELS = 3;
HOST_DEV_INLINE ExactSplitCandidate maxSplit(ExactSplitCandidate a,
ExactSplitCandidate b) {
ExactSplitCandidate out;
if (a.score < b.score) {
out.score = b.score;
out.index = b.index;
} else if (a.score == b.score) {
out.score = a.score;
out.index = (a.index < b.index) ? a.index : b.index;
} else {
out.score = a.score;
out.index = a.index;
}
return out;
}
DEV_INLINE void atomicArgMax(ExactSplitCandidate* address,
ExactSplitCandidate val) {
unsigned long long* intAddress = (unsigned long long*)address; // NOLINT
unsigned long long old = *intAddress; // NOLINT
unsigned long long assumed; // NOLINT
do {
assumed = old;
ExactSplitCandidate res =
maxSplit(val, *reinterpret_cast<ExactSplitCandidate*>(&assumed));
old = atomicCAS(intAddress, assumed, *reinterpret_cast<uint64_t*>(&res));
} while (assumed != old);
}
DEV_INLINE void argMaxWithAtomics(
int id, ExactSplitCandidate* nodeSplits, const bst_gpair* gradScans,
const bst_gpair* gradSums, const float* vals, const int* colIds,
const node_id_t* nodeAssigns, const DeviceDenseNode* nodes, int nUniqKeys,
node_id_t nodeStart, int len, const GPUTrainingParam& param) {
int nodeId = nodeAssigns[id];
// @todo: this is really a bad check! but will be fixed when we move
// to key-based reduction
if ((id == 0) ||
!((nodeId == nodeAssigns[id - 1]) && (colIds[id] == colIds[id - 1]) &&
(vals[id] == vals[id - 1]))) {
if (nodeId != UNUSED_NODE) {
int sumId = abs2uniqKey(id, nodeAssigns, colIds, nodeStart, nUniqKeys);
bst_gpair colSum = gradSums[sumId];
int uid = nodeId - nodeStart;
DeviceDenseNode n = nodes[nodeId];
bst_gpair parentSum = n.sum_gradients;
float parentGain = n.root_gain;
bool tmp;
ExactSplitCandidate s;
bst_gpair missing = parentSum - colSum;
s.score = loss_chg_missing(gradScans[id], missing, parentSum, parentGain,
param, tmp);
s.index = id;
atomicArgMax(nodeSplits + uid, s);
} // end if nodeId != UNUSED_NODE
} // end if id == 0 ...
}
__global__ void atomicArgMaxByKeyGmem(
ExactSplitCandidate* nodeSplits, const bst_gpair* gradScans,
const bst_gpair* gradSums, const float* vals, const int* colIds,
const node_id_t* nodeAssigns, const DeviceDenseNode* nodes, int nUniqKeys,
node_id_t nodeStart, int len, const TrainParam param) {
int id = threadIdx.x + (blockIdx.x * blockDim.x);
const int stride = blockDim.x * gridDim.x;
for (; id < len; id += stride) {
argMaxWithAtomics(id, nodeSplits, gradScans, gradSums, vals, colIds,
nodeAssigns, nodes, nUniqKeys, nodeStart, len,
GPUTrainingParam(param));
}
}
__global__ void atomicArgMaxByKeySmem(
ExactSplitCandidate* nodeSplits, const bst_gpair* gradScans,
const bst_gpair* gradSums, const float* vals, const int* colIds,
const node_id_t* nodeAssigns, const DeviceDenseNode* nodes, int nUniqKeys,
node_id_t nodeStart, int len, const TrainParam param) {
extern __shared__ char sArr[];
ExactSplitCandidate* sNodeSplits =
reinterpret_cast<ExactSplitCandidate*>(sArr);
int tid = threadIdx.x;
ExactSplitCandidate defVal;
#pragma unroll 1
for (int i = tid; i < nUniqKeys; i += blockDim.x) {
sNodeSplits[i] = defVal;
}
__syncthreads();
int id = tid + (blockIdx.x * blockDim.x);
const int stride = blockDim.x * gridDim.x;
for (; id < len; id += stride) {
argMaxWithAtomics(id, sNodeSplits, gradScans, gradSums, vals, colIds,
nodeAssigns, nodes, nUniqKeys, nodeStart, len, param);
}
__syncthreads();
for (int i = tid; i < nUniqKeys; i += blockDim.x) {
ExactSplitCandidate s = sNodeSplits[i];
atomicArgMax(nodeSplits + i, s);
}
}
/**
* @brief Performs argmax_by_key functionality but for cases when keys need not
* occur contiguously
* @param nodeSplits will contain information on best split for each node
* @param gradScans exclusive sum on sorted segments for each col
* @param gradSums gradient sum for each column in DMatrix based on to node-ids
* @param vals feature values
* @param colIds column index for each element in the feature values array
* @param nodeAssigns node-id assignments to each element in DMatrix
* @param nodes pointer to all nodes for this tree in BFS order
* @param nUniqKeys number of unique node-ids in this level
* @param nodeStart start index of the node-ids in this level
* @param len number of elements
* @param param training parameters
* @param algo which algorithm to use for argmax_by_key
*/
template <int BLKDIM = 256, int ITEMS_PER_THREAD = 4>
void argMaxByKey(ExactSplitCandidate* nodeSplits, const bst_gpair* gradScans,
const bst_gpair* gradSums, const float* vals,
const int* colIds, const node_id_t* nodeAssigns,
const DeviceDenseNode* nodes, int nUniqKeys,
node_id_t nodeStart, int len, const TrainParam param,
ArgMaxByKeyAlgo algo) {
dh::fillConst<ExactSplitCandidate, BLKDIM, ITEMS_PER_THREAD>(
dh::get_device_idx(param.gpu_id), nodeSplits, nUniqKeys,
ExactSplitCandidate());
int nBlks = dh::div_round_up(len, ITEMS_PER_THREAD * BLKDIM);
switch (algo) {
case ABK_GMEM:
atomicArgMaxByKeyGmem<<<nBlks, BLKDIM>>>(
nodeSplits, gradScans, gradSums, vals, colIds, nodeAssigns, nodes,
nUniqKeys, nodeStart, len, param);
break;
case ABK_SMEM:
atomicArgMaxByKeySmem<<<nBlks, BLKDIM,
sizeof(ExactSplitCandidate) * nUniqKeys>>>(
nodeSplits, gradScans, gradSums, vals, colIds, nodeAssigns, nodes,
nUniqKeys, nodeStart, len, param);
break;
default:
throw std::runtime_error("argMaxByKey: Bad algo passed!");
}
}
__global__ void assignColIds(int* colIds, const int* colOffsets) {
int myId = blockIdx.x;
int start = colOffsets[myId];
int end = colOffsets[myId + 1];
for (int id = start + threadIdx.x; id < end; id += blockDim.x) {
colIds[id] = myId;
}
}
__global__ void fillDefaultNodeIds(node_id_t* nodeIdsPerInst,
const DeviceDenseNode* nodes, int nRows) {
int id = threadIdx.x + (blockIdx.x * blockDim.x);
if (id >= nRows) {
return;
}
// if this element belongs to none of the currently active node-id's
node_id_t nId = nodeIdsPerInst[id];
if (nId == UNUSED_NODE) {
return;
}
const DeviceDenseNode n = nodes[nId];
node_id_t result;
if (n.IsLeaf() || n.IsUnused()) {
result = UNUSED_NODE;
} else if (n.dir == LeftDir) {
result = (2 * n.idx) + 1;
} else {
result = (2 * n.idx) + 2;
}
nodeIdsPerInst[id] = result;
}
__global__ void assignNodeIds(node_id_t* nodeIdsPerInst, int* nodeLocations,
const node_id_t* nodeIds, const int* instId,
const DeviceDenseNode* nodes,
const int* colOffsets, const float* vals,
int nVals, int nCols) {
int id = threadIdx.x + (blockIdx.x * blockDim.x);
const int stride = blockDim.x * gridDim.x;
for (; id < nVals; id += stride) {
// fusing generation of indices for node locations
nodeLocations[id] = id;
// using nodeIds here since the previous kernel would have updated
// the nodeIdsPerInst with all default assignments
int nId = nodeIds[id];
// if this element belongs to none of the currently active node-id's
if (nId != UNUSED_NODE) {
const DeviceDenseNode n = nodes[nId];
int colId = n.fidx;
// printf("nid=%d colId=%d id=%d\n", nId, colId, id);
int start = colOffsets[colId];
int end = colOffsets[colId + 1];
// @todo: too much wasteful threads!!
if ((id >= start) && (id < end) && !(n.IsLeaf() || n.IsUnused())) {
node_id_t result = (2 * n.idx) + 1 + (vals[id] >= n.fvalue);
nodeIdsPerInst[instId[id]] = result;
}
}
}
}
__global__ void markLeavesKernel(DeviceDenseNode* nodes, int len) {
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
if ((id < len) && !nodes[id].IsUnused()) {
int lid = (id << 1) + 1;
int rid = (id << 1) + 2;
if ((lid >= len) || (rid >= len)) {
nodes[id].root_gain = -FLT_MAX; // bottom-most nodes
} else if (nodes[lid].IsUnused() && nodes[rid].IsUnused()) {
nodes[id].root_gain = -FLT_MAX; // unused child nodes
}
}
}
class GPUMaker : public TreeUpdater {
protected:
TrainParam param;
/** whether we have initialized memory already (so as not to repeat!) */
bool allocated;
/** feature values stored in column-major compressed format */
dh::dvec2<float> vals;
dh::dvec<float> vals_cached;
/** corresponding instance id's of these featutre values */
dh::dvec2<int> instIds;
dh::dvec<int> instIds_cached;
/** column offsets for these feature values */
dh::dvec<int> colOffsets;
dh::dvec<bst_gpair> gradsInst;
dh::dvec2<node_id_t> nodeAssigns;
dh::dvec2<int> nodeLocations;
dh::dvec<DeviceDenseNode> nodes;
dh::dvec<node_id_t> nodeAssignsPerInst;
dh::dvec<bst_gpair> gradSums;
dh::dvec<bst_gpair> gradScans;
dh::dvec<ExactSplitCandidate> nodeSplits;
int nVals;
int nRows;
int nCols;
int maxNodes;
int maxLeaves;
dh::CubMemory tmp_mem;
dh::dvec<bst_gpair> tmpScanGradBuff;
dh::dvec<int> tmpScanKeyBuff;
dh::dvec<int> colIds;
dh::bulk_allocator<dh::memory_type::DEVICE> ba;
public:
GPUMaker() : allocated(false) {}
~GPUMaker() {}
void Init(
const std::vector<std::pair<std::string, std::string>>& args) override {
param.InitAllowUnknown(args);
maxNodes = (1 << (param.max_depth + 1)) - 1;
maxLeaves = 1 << param.max_depth;
}
void Update(const std::vector<bst_gpair>& gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) override {
GradStats::CheckInfo(dmat->info());
// rescale learning rate according to size of trees
float lr = param.learning_rate;
param.learning_rate = lr / trees.size();
try {
// build tree
for (size_t i = 0; i < trees.size(); ++i) {
UpdateTree(gpair, dmat, trees[i]);
}
} catch (const std::exception& e) {
LOG(FATAL) << "GPU plugin exception: " << e.what() << std::endl;
}
param.learning_rate = lr;
}
/// @note: Update should be only after Init!!
void UpdateTree(const std::vector<bst_gpair>& gpair, DMatrix* dmat,
RegTree* hTree) {
if (!allocated) {
setupOneTimeData(dmat);
}
for (int i = 0; i < param.max_depth; ++i) {
if (i == 0) {
// make sure to start on a fresh tree with sorted values!
vals.current_dvec() = vals_cached;
instIds.current_dvec() = instIds_cached;
transferGrads(gpair);
}
int nNodes = 1 << i;
node_id_t nodeStart = nNodes - 1;
initNodeData(i, nodeStart, nNodes);
findSplit(i, nodeStart, nNodes);
}
// mark all the used nodes with unused children as leaf nodes
markLeaves();
dense2sparse_tree(hTree, nodes, param);
}
void split2node(int nNodes, node_id_t nodeStart) {
auto d_nodes = nodes.data();
auto d_gradScans = gradScans.data();
auto d_gradSums = gradSums.data();
auto d_nodeAssigns = nodeAssigns.current();
auto d_colIds = colIds.data();
auto d_vals = vals.current();
auto d_nodeSplits = nodeSplits.data();
int nUniqKeys = nNodes;
float min_split_loss = param.min_split_loss;
auto gpu_param = GPUTrainingParam(param);
dh::launch_n(param.gpu_id, nNodes, [=] __device__(int uid) {
int absNodeId = uid + nodeStart;
ExactSplitCandidate s = d_nodeSplits[uid];
if (s.isSplittable(min_split_loss)) {
int idx = s.index;
int nodeInstId =
abs2uniqKey(idx, d_nodeAssigns, d_colIds, nodeStart, nUniqKeys);
bool missingLeft = true;
const DeviceDenseNode& n = d_nodes[absNodeId];
bst_gpair gradScan = d_gradScans[idx];
bst_gpair gradSum = d_gradSums[nodeInstId];
float thresh = d_vals[idx];
int colId = d_colIds[idx];
// get the default direction for the current node
bst_gpair missing = n.sum_gradients - gradSum;
loss_chg_missing(gradScan, missing, n.sum_gradients, n.root_gain,
gpu_param, missingLeft);
// get the score/weight/id/gradSum for left and right child nodes
bst_gpair lGradSum = missingLeft ? gradScan + missing : gradScan;
bst_gpair rGradSum = n.sum_gradients - lGradSum;
// Create children
d_nodes[left_child_nidx(absNodeId)] =
DeviceDenseNode(lGradSum, left_child_nidx(absNodeId), gpu_param);
d_nodes[right_child_nidx(absNodeId)] =
DeviceDenseNode(rGradSum, right_child_nidx(absNodeId), gpu_param);
// Set split for parent
d_nodes[absNodeId].SetSplit(thresh, colId,
missingLeft ? LeftDir : RightDir);
} else {
// cannot be split further, so this node is a leaf!
d_nodes[absNodeId].root_gain = -FLT_MAX;
}
});
}
void findSplit(int level, node_id_t nodeStart, int nNodes) {
reduceScanByKey(gradSums.data(), gradScans.data(), gradsInst.data(),
instIds.current(), nodeAssigns.current(), nVals, nNodes,
nCols, tmpScanGradBuff.data(), tmpScanKeyBuff.data(),
colIds.data(), nodeStart);
argMaxByKey(nodeSplits.data(), gradScans.data(), gradSums.data(),
vals.current(), colIds.data(), nodeAssigns.current(),
nodes.data(), nNodes, nodeStart, nVals, param,
level <= MAX_ABK_LEVELS ? ABK_SMEM : ABK_GMEM);
split2node(nNodes, nodeStart);
}
void allocateAllData(int offsetSize) {
int tmpBuffSize = scanTempBufferSize(nVals);
ba.allocate(dh::get_device_idx(param.gpu_id), param.silent, &vals, nVals,
&vals_cached, nVals, &instIds, nVals, &instIds_cached, nVals,
&colOffsets, offsetSize, &gradsInst, nRows, &nodeAssigns, nVals,
&nodeLocations, nVals, &nodes, maxNodes, &nodeAssignsPerInst,
nRows, &gradSums, maxLeaves * nCols, &gradScans, nVals,
&nodeSplits, maxLeaves, &tmpScanGradBuff, tmpBuffSize,
&tmpScanKeyBuff, tmpBuffSize, &colIds, nVals);
}
void setupOneTimeData(DMatrix* dmat) {
size_t free_memory = dh::available_memory(dh::get_device_idx(param.gpu_id));
if (!dmat->SingleColBlock()) {
throw std::runtime_error("exact::GPUBuilder - must have 1 column block");
}
std::vector<float> fval;
std::vector<int> fId, offset;
convertToCsc(dmat, &fval, &fId, &offset);
allocateAllData(static_cast<int>(offset.size()));
transferAndSortData(fval, fId, offset);
allocated = true;
}
void convertToCsc(DMatrix* dmat, std::vector<float>* fval,
std::vector<int>* fId, std::vector<int>* offset) {
MetaInfo info = dmat->info();
nRows = info.num_row;
nCols = info.num_col;
offset->reserve(nCols + 1);
offset->push_back(0);
fval->reserve(nCols * nRows);
fId->reserve(nCols * nRows);
// in case you end up with a DMatrix having no column access
// then make sure to enable that before copying the data!
if (!dmat->HaveColAccess()) {
const std::vector<bool> enable(nCols, true);
dmat->InitColAccess(enable, 1, nRows);
}
dmlc::DataIter<ColBatch>* iter = dmat->ColIterator();
iter->BeforeFirst();
while (iter->Next()) {
const ColBatch& batch = iter->Value();
for (int i = 0; i < batch.size; i++) {
const ColBatch::Inst& col = batch[i];
for (const ColBatch::Entry* it = col.data; it != col.data + col.length;
it++) {
int inst_id = static_cast<int>(it->index);
fval->push_back(it->fvalue);
fId->push_back(inst_id);
}
offset->push_back(fval->size());
}
}
nVals = fval->size();
}
void transferAndSortData(const std::vector<float>& fval,
const std::vector<int>& fId,
const std::vector<int>& offset) {
vals.current_dvec() = fval;
instIds.current_dvec() = fId;
colOffsets = offset;
dh::segmentedSort<float, int>(&tmp_mem, &vals, &instIds, nVals, nCols,
colOffsets);
vals_cached = vals.current_dvec();
instIds_cached = instIds.current_dvec();
assignColIds<<<nCols, 512>>>(colIds.data(), colOffsets.data());
}
void transferGrads(const std::vector<bst_gpair>& gpair) {
// HACK
dh::safe_cuda(cudaMemcpy(gradsInst.data(), &(gpair[0]),
sizeof(bst_gpair) * nRows,
cudaMemcpyHostToDevice));
// evaluate the full-grad reduction for the root node
dh::sumReduction<bst_gpair>(tmp_mem, gradsInst, gradSums, nRows);
}
void initNodeData(int level, node_id_t nodeStart, int nNodes) {
// all instances belong to root node at the beginning!
if (level == 0) {
nodes.fill(DeviceDenseNode());
nodeAssigns.current_dvec().fill(0);
nodeAssignsPerInst.fill(0);
// for root node, just update the gradient/score/weight/id info
// before splitting it! Currently all data is on GPU, hence this
// stupid little kernel
auto d_nodes = nodes.data();
auto d_sums = gradSums.data();
auto gpu_params = GPUTrainingParam(param);
dh::launch_n(param.gpu_id, 1, [=] __device__(int idx) {
d_nodes[0] = DeviceDenseNode(d_sums[0], 0, gpu_params);
});
} else {
const int BlkDim = 256;
const int ItemsPerThread = 4;
// assign default node ids first
int nBlks = dh::div_round_up(nRows, BlkDim);
fillDefaultNodeIds<<<nBlks, BlkDim>>>(nodeAssignsPerInst.data(),
nodes.data(), nRows);
// evaluate the correct child indices of non-missing values next
nBlks = dh::div_round_up(nVals, BlkDim * ItemsPerThread);
assignNodeIds<<<nBlks, BlkDim>>>(
nodeAssignsPerInst.data(), nodeLocations.current(),
nodeAssigns.current(), instIds.current(), nodes.data(),
colOffsets.data(), vals.current(), nVals, nCols);
// gather the node assignments across all other columns too
dh::gather(dh::get_device_idx(param.gpu_id), nodeAssigns.current(),
nodeAssignsPerInst.data(), instIds.current(), nVals);
sortKeys(level);
}
}
void sortKeys(int level) {
// segmented-sort the arrays based on node-id's
// but we don't need more than level+1 bits for sorting!
segmentedSort(&tmp_mem, &nodeAssigns, &nodeLocations, nVals, nCols,
colOffsets, 0, level + 1);
dh::gather<float, int>(dh::get_device_idx(param.gpu_id), vals.other(),
vals.current(), instIds.other(), instIds.current(),
nodeLocations.current(), nVals);
vals.buff().selector ^= 1;
instIds.buff().selector ^= 1;
}
void markLeaves() {
const int BlkDim = 128;
int nBlks = dh::div_round_up(maxNodes, BlkDim);
markLeavesKernel<<<nBlks, BlkDim>>>(nodes.data(), maxNodes);
}
};
XGBOOST_REGISTER_TREE_UPDATER(GPUMaker, "grow_gpu")
.describe("Grow tree with GPU.")
.set_body([]() { return new GPUMaker(); });
} // namespace tree
} // namespace xgboost
|
77113a8c28a248ce28a1bf58cfc47158e36ff439.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "orc_common.h"
#include "orc_gpu.h"
#include <io/utilities/block_utils.cuh>
// Apache ORC reader does not handle zero-length patch lists for RLEv2 mode2
// Workaround replaces zero-length patch lists by a dummy zero patch
#define ZERO_PLL_WAR 1
namespace cudf {
namespace io {
namespace orc {
namespace gpu {
#define SCRATCH_BFRSZ (512*4)
static __device__ __constant__ int64_t kORCTimeToUTC = 1420070400; // Seconds from January 1st, 1970 to January 1st, 2015
struct byterle_enc_state_s
{
uint32_t literal_run;
uint32_t repeat_run;
volatile uint32_t rpt_map[(512 / 32) + 1];
};
struct intrle_enc_state_s
{
uint32_t literal_run;
uint32_t delta_run;
uint32_t literal_mode;
uint32_t literal_w;
uint32_t hdr_bytes;
uint32_t pl_bytes;
volatile uint32_t delta_map[(512 / 32) + 1];
volatile union {
uint32_t u32[(512 / 32) * 2];
uint64_t u64[(512 / 32) * 2];
} scratch;
};
struct strdata_enc_state_s
{
uint32_t char_count;
uint32_t lengths_red[(512 / 32)];
const char *str_data[512];
};
struct orcenc_state_s
{
uint32_t cur_row; // Current row in group
uint32_t present_rows; // # of rows in present buffer
uint32_t present_out; // # of rows in present buffer that have been flushed
uint32_t nrows; // # of rows in current batch
uint32_t numvals; // # of non-zero values in current batch (<=nrows)
uint32_t numlengths; // # of non-zero values in DATA2 batch
uint32_t nnz; // Running count of non-null values
EncChunk chunk;
uint32_t strm_pos[CI_NUM_STREAMS];
uint8_t valid_buf[512]; // valid map bits
union {
byterle_enc_state_s byterle;
intrle_enc_state_s intrle;
strdata_enc_state_s strenc;
StripeDictionary dict_stripe;
} u;
union {
uint8_t u8[SCRATCH_BFRSZ]; // general scratch buffer
uint32_t u32[SCRATCH_BFRSZ /4];
} buf;
union {
uint8_t u8[2048];
uint32_t u32[1024];
int32_t i32[1024];
uint64_t u64[1024];
int64_t i64[1024];
} vals;
union {
uint8_t u8[2048];
uint32_t u32[1024];
} lengths;
};
static inline __device__ uint32_t zigzag32(int32_t v) { int32_t s = (v >> 31); return ((v ^ s) * 2) - s; }
static inline __device__ uint64_t zigzag64(int64_t v) { int64_t s = (v < 0) ? 1 : 0; return ((v ^ -s) * 2) + s; }
static inline __device__ uint32_t CountLeadingBytes32(uint32_t v) { return __clz(v) >> 3; }
static inline __device__ uint32_t CountLeadingBytes64(uint64_t v) { return __clzll(v) >> 3; }
/**
* @brief Raw data output
*
* @param[in] cid stream type (strm_pos[cid] will be updated and output stored at streams[cid]+strm_pos[cid])
* @param[in] inmask input buffer position mask for circular buffers
* @param[in] s encoder state
* @param[in] inbuf base input buffer
* @param[in] inpos position in input buffer
* @param[in] count number of bytes to encode
* @param[in] t thread id
*
**/
template<StreamIndexType cid, uint32_t inmask>
static __device__ void StoreBytes(orcenc_state_s *s, const uint8_t *inbuf, uint32_t inpos, uint32_t count, int t)
{
uint8_t *dst = s->chunk.streams[cid] + s->strm_pos[cid];
while (count > 0)
{
uint32_t n = min(count, 512);
if (t < n)
{
dst[t] = inbuf[(inpos + t) & inmask];
}
dst += n;
inpos += n;
count -= n;
}
__syncthreads();
if (!t)
{
s->strm_pos[cid] = static_cast<uint32_t>(dst - s->chunk.streams[cid]);
}
}
/**
* @brief ByteRLE encoder
*
* @param[in] cid stream type (strm_pos[cid] will be updated and output stored at streams[cid]+strm_pos[cid])
* @param[in] s encoder state
* @param[in] inbuf base input buffer
* @param[in] inpos position in input buffer
* @param[in] inmask input buffer position mask for circular buffers
* @param[in] numvals max number of values to encode
* @param[in] flush encode all remaining values if nonzero
* @param[in] t thread id
*
* @return number of input values encoded
*
**/
template<StreamIndexType cid, uint32_t inmask>
static __device__ uint32_t ByteRLE(orcenc_state_s *s, const uint8_t *inbuf, uint32_t inpos, uint32_t numvals, uint32_t flush, int t)
{
uint8_t *dst = s->chunk.streams[cid] + s->strm_pos[cid];
uint32_t out_cnt = 0;
while (numvals > 0)
{
uint8_t v0 = (t < numvals) ? inbuf[(inpos + t) & inmask] : 0;
uint8_t v1 = (t + 1 < numvals) ? inbuf[(inpos + t + 1) & inmask] : 0;
uint32_t rpt_map = BALLOT(t + 1 < numvals && v0 == v1), literal_run, repeat_run, maxvals = min(numvals, 512);
if (!(t & 0x1f))
s->u.byterle.rpt_map[t >> 5] = rpt_map;
__syncthreads();
if (t == 0)
{
// Find the start of an identical 3-byte sequence
// TBD: The two loops below could be eliminated using more ballot+ffs using warp0
literal_run = 0;
repeat_run = 0;
while (literal_run < maxvals)
{
uint32_t next = s->u.byterle.rpt_map[(literal_run >> 5) + 1];
uint32_t mask = rpt_map & __funnelshift_r(rpt_map, next, 1);
if (mask)
{
uint32_t literal_run_ofs = __ffs(mask) - 1;
literal_run += literal_run_ofs;
repeat_run = __ffs(~((rpt_map >> literal_run_ofs) >> 1));
if (repeat_run + literal_run_ofs == 32)
{
while (next == ~0)
{
uint32_t next_idx = ((literal_run + repeat_run) >> 5) + 1;
next = (next_idx < 512 / 32) ? s->u.byterle.rpt_map[next_idx] : 0;
repeat_run += 32;
}
repeat_run += __ffs(~next) - 1;
}
repeat_run = min(repeat_run + 1, maxvals - min(literal_run, maxvals));
if (repeat_run < 3)
{
literal_run += (flush && literal_run + repeat_run >= numvals) ? repeat_run : 0;
repeat_run = 0;
}
break;
}
rpt_map = next;
literal_run += 32;
}
if (repeat_run >= 130)
{
// Limit large runs to multiples of 130
repeat_run = (repeat_run >= 3*130) ? 3*130 : (repeat_run >= 2*130) ? 2*130 : 130;
}
else if (literal_run && literal_run + repeat_run == maxvals)
{
repeat_run = 0; // Try again at next iteration
}
s->u.byterle.repeat_run = repeat_run;
s->u.byterle.literal_run = min(literal_run, maxvals);
}
__syncthreads();
literal_run = s->u.byterle.literal_run;
if (!flush && literal_run == numvals)
{
literal_run &= ~0x7f;
if (!literal_run)
break;
}
if (literal_run > 0)
{
uint32_t num_runs = (literal_run + 0x7f) >> 7;
if (t < literal_run)
{
uint32_t run_id = t >> 7;
uint32_t run = min(literal_run - run_id * 128, 128);
if (!(t & 0x7f))
dst[run_id + t] = 0x100 - run;
dst[run_id + t + 1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0;
}
dst += num_runs + literal_run;
out_cnt += literal_run;
numvals -= literal_run;
inpos += literal_run;
}
repeat_run = s->u.byterle.repeat_run;
if (repeat_run > 0)
{
while (repeat_run >= 130)
{
if (t == literal_run) // repeat_run follows literal_run
{
dst[0] = 0x7f;
dst[1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0;
}
dst += 2;
out_cnt += 130;
numvals -= 130;
inpos += 130;
repeat_run -= 130;
}
if (!flush && repeat_run == numvals)
{
// Wait for more data in case we can continue the run later
break;
}
if (repeat_run >= 3)
{
if (t == literal_run) // repeat_run follows literal_run
{
dst[0] = repeat_run - 3;
dst[1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0;
}
dst += 2;
out_cnt += repeat_run;
numvals -= repeat_run;
inpos += repeat_run;
}
}
}
if (!t)
{
s->strm_pos[cid] = static_cast<uint32_t>(dst - s->chunk.streams[cid]);
}
__syncthreads();
return out_cnt;
}
/**
* @brief Maps the symbol size in bytes to RLEv2 5-bit length code
**/
static const __device__ __constant__ uint8_t kByteLengthToRLEv2_W[9] =
{
0, 7, 15, 23, 27, 28, 29, 30, 31
};
/**
* @brief Encode a varint value, return the number of bytes written
**/
static inline __device__ uint32_t StoreVarint(uint8_t *dst, uint64_t v)
{
uint32_t bytecnt = 0;
for (;;)
{
uint32_t c = (uint32_t)(v & 0x7f);
v >>= 7u;
if (v == 0)
{
dst[bytecnt++] = c;
break;
}
else
{
dst[bytecnt++] = c + 0x80;
}
}
return bytecnt;
}
static inline __device__ void intrle_minmax(int64_t &vmin, int64_t &vmax) { vmin = INT64_MIN; vmax = INT64_MAX; }
//static inline __device__ void intrle_minmax(uint64_t &vmin, uint64_t &vmax) { vmin = UINT64_C(0); vmax = UINT64_MAX; }
static inline __device__ void intrle_minmax(int32_t &vmin, int32_t &vmax) { vmin = INT32_MIN; vmax = INT32_MAX; }
static inline __device__ void intrle_minmax(uint32_t &vmin, uint32_t &vmax) { vmin = UINT32_C(0); vmax = UINT32_MAX; }
template<class T>
static inline __device__ void StoreBytesBigEndian(uint8_t *dst, T v, uint32_t w)
{
for (uint32_t i = 0, b = w * 8; i < w; ++i)
{
b -= 8;
dst[i] = static_cast<uint8_t>(v >> b);
}
}
// Combine and store bits for symbol widths less than 8
static inline __device__ void StoreBitsBigEndian(uint8_t *dst, uint32_t v, uint32_t w, int num_vals, int t)
{
if (t <= (num_vals | 0x1f))
{
uint32_t mask;
if (w <= 1)
{
v = (v << 1) | (SHFL_XOR(v, 1) & 0x1);
v = (v << 2) | (SHFL_XOR(v, 2) & 0x3);
v = (v << 4) | (SHFL_XOR(v, 4) & 0xf);
mask = 0x7;
}
else if (w <= 2)
{
v = (v << 2) | (SHFL_XOR(v, 1) & 0x3);
v = (v << 4) | (SHFL_XOR(v, 2) & 0xf);
mask = 0x3;
}
else // if (w <= 4)
{
v = (v << 4) | (SHFL_XOR(v, 1) & 0xf);
mask = 0x1;
}
if (t < num_vals && !(t & mask))
{
dst[(t * w) >> 3] = static_cast<uint8_t>(v);
}
}
}
/**
* @brief Integer RLEv2 encoder
*
* @param[in] cid stream type (strm_pos[cid] will be updated and output stored at streams[cid]+strm_pos[cid])
* @param[in] s encoder state
* @param[in] inbuf base input buffer
* @param[in] inpos position in input buffer
* @param[in] inmask input buffer position mask for circular buffers
* @param[in] numvals max number of values to encode
* @param[in] flush encode all remaining values if nonzero
* @param[in] t thread id
*
* @return number of input values encoded
*
**/
template<StreamIndexType cid, class T, bool is_signed, uint32_t inmask>
static __device__ uint32_t IntegerRLE(orcenc_state_s *s, const T *inbuf, uint32_t inpos, uint32_t numvals, uint32_t flush, int t)
{
uint8_t *dst = s->chunk.streams[cid] + s->strm_pos[cid];
uint32_t out_cnt = 0;
while (numvals > 0)
{
T v0 = (t < numvals) ? inbuf[(inpos + t) & inmask] : 0;
T v1 = (t + 1 < numvals) ? inbuf[(inpos + t + 1) & inmask] : 0;
T v2 = (t + 2 < numvals) ? inbuf[(inpos + t + 2) & inmask] : 0;
uint32_t delta_map = BALLOT(t + 2 < numvals && v1 - v0 == v2 - v1), maxvals = min(numvals, 512), literal_run, delta_run;
if (!(t & 0x1f))
s->u.intrle.delta_map[t >> 5] = delta_map;
__syncthreads();
if (!t)
{
// Find the start of the next delta run (2 consecutive values with the same delta)
literal_run = delta_run = 0;
while (literal_run < maxvals)
{
if (delta_map != 0)
{
uint32_t literal_run_ofs = __ffs(delta_map) - 1;
literal_run += literal_run_ofs;
delta_run = __ffs(~((delta_map >> literal_run_ofs) >> 1));
if (literal_run_ofs + delta_run == 32)
{
for (;;)
{
uint32_t delta_idx = (literal_run + delta_run) >> 5;
delta_map = (delta_idx < 512/32) ? s->u.intrle.delta_map[delta_idx] : 0;
if (delta_map != ~0)
break;
delta_run += 32;
}
delta_run += __ffs(~delta_map) - 1;
}
delta_run += 2;
break;
}
literal_run += 32;
delta_map = s->u.intrle.delta_map[(literal_run >> 5)];
}
literal_run = min(literal_run, maxvals);
s->u.intrle.literal_run = literal_run;
s->u.intrle.delta_run = min(delta_run, maxvals - literal_run);
}
__syncthreads();
literal_run = s->u.intrle.literal_run;
// Find minimum and maximum values
if (literal_run > 0)
{
// Find min & max
T vmin, vmax;
uint32_t literal_mode, literal_w;
if (t < literal_run)
{
vmin = vmax = v0;
}
else
{
intrle_minmax(vmax, vmin);
}
vmin = min(vmin, (T)SHFL_XOR(vmin, 1));
vmin = min(vmin, (T)SHFL_XOR(vmin, 2));
vmin = min(vmin, (T)SHFL_XOR(vmin, 4));
vmin = min(vmin, (T)SHFL_XOR(vmin, 8));
vmin = min(vmin, (T)SHFL_XOR(vmin, 16));
vmax = max(vmax, (T)SHFL_XOR(vmax, 1));
vmax = max(vmax, (T)SHFL_XOR(vmax, 2));
vmax = max(vmax, (T)SHFL_XOR(vmax, 4));
vmax = max(vmax, (T)SHFL_XOR(vmax, 8));
vmax = max(vmax, (T)SHFL_XOR(vmax, 16));
if (!(t & 0x1f))
{
s->u.intrle.scratch.u64[(t >> 5) * 2 + 0] = vmin;
s->u.intrle.scratch.u64[(t >> 5) * 2 + 1] = vmax;
}
__syncthreads();
if (t < 32)
{
vmin = (T)s->u.intrle.scratch.u64[(t & 0xf) * 2 + 0];
vmax = (T)s->u.intrle.scratch.u64[(t & 0xf) * 2 + 1];
vmin = min(vmin, (T)SHFL_XOR(vmin, 1));
vmin = min(vmin, (T)SHFL_XOR(vmin, 2));
vmin = min(vmin, (T)SHFL_XOR(vmin, 4));
vmin = min(vmin, (T)SHFL_XOR(vmin, 8));
vmax = max(vmax, (T)SHFL_XOR(vmax, 1));
vmax = max(vmax, (T)SHFL_XOR(vmax, 2));
vmax = max(vmax, (T)SHFL_XOR(vmax, 4));
vmax = max(vmax, (T)SHFL_XOR(vmax, 8));
if (t == 0)
{
uint32_t mode1_w, mode2_w;
T vrange_mode1, vrange_mode2;
s->u.intrle.scratch.u64[0] = (uint64_t)vmin;
if (sizeof(T) > 4)
{
vrange_mode1 = (is_signed) ? max(zigzag64(vmin), zigzag64(vmax)) : vmax;
vrange_mode2 = vmax - vmin;
mode1_w = 8 - min(CountLeadingBytes64(vrange_mode1), 7);
mode2_w = 8 - min(CountLeadingBytes64(vrange_mode2), 7);
}
else
{
vrange_mode1 = (is_signed) ? max(zigzag32(vmin), zigzag32(vmax)) : vmax;
vrange_mode2 = vmax - vmin;
mode1_w = 4 - min(CountLeadingBytes32(vrange_mode1), 3);
mode2_w = 4 - min(CountLeadingBytes32(vrange_mode2), 3);
}
// Decide between mode1 & mode2 (also mode3 for length=2 repeat)
if (vrange_mode2 == 0 && mode1_w > 1)
{
// Should only occur if literal_run==2 (otherwise would have resulted in repeat_run >= 3)
uint32_t bytecnt = 2;
dst[0] = 0xC0 + ((literal_run - 1) >> 8);
dst[1] = (literal_run - 1) & 0xff;
bytecnt += StoreVarint(dst + 2, vrange_mode1);
dst[bytecnt++] = 0; // Zero delta
s->u.intrle.literal_mode = 3;
s->u.intrle.literal_w = bytecnt;
}
else
{
uint32_t range, w;
if (mode1_w > mode2_w && (literal_run - 1) * (mode1_w - mode2_w) > 4)
{
s->u.intrle.literal_mode = 2;
w = mode2_w;
range = (uint32_t)vrange_mode2;
}
else
{
s->u.intrle.literal_mode = 1;
w = mode1_w;
range = (uint32_t)vrange_mode1;
}
if (w == 1)
w = (range >= 16) ? w << 3 : (range >= 4) ? 4 : (range >= 2) ? 2 : 1;
else
w <<= 3; // bytes -> bits
s->u.intrle.literal_w = w;
}
}
}
__syncthreads();
vmin = (T)s->u.intrle.scratch.u64[0];
literal_mode = s->u.intrle.literal_mode;
literal_w = s->u.intrle.literal_w;
if (literal_mode == 1)
{
// Direct mode
if (!t)
{
dst[0] = 0x40 + ((literal_w < 8) ? literal_w - 1 : kByteLengthToRLEv2_W[literal_w >> 3]) * 2 + ((literal_run - 1) >> 8);
dst[1] = (literal_run - 1) & 0xff;
}
dst += 2;
if (t < literal_run && is_signed)
{
if (sizeof(T) > 4)
v0 = zigzag64(v0);
else
v0 = zigzag32(v0);
}
if (literal_w < 8)
StoreBitsBigEndian(dst, (uint32_t)v0, literal_w, literal_run, t);
else if (t < literal_run)
StoreBytesBigEndian(dst + t * (literal_w >> 3), v0, (literal_w >> 3));
}
else if (literal_mode == 2)
{
// Patched base mode
if (!t)
{
uint32_t bw, pw = 1, pll, pgw = 1, bv_scale = (is_signed) ? 0 : 1;
vmax = (is_signed) ? ((vmin < 0) ? -vmin : vmin) * 2 : vmin;
bw = (sizeof(T) > 4) ? (8 - min(CountLeadingBytes64(vmax << bv_scale), 7)) : (4 - min(CountLeadingBytes32(vmax << bv_scale), 3));
#if ZERO_PLL_WAR
// Insert a dummy zero patch
pll = 1;
dst[4 + bw + ((literal_run * literal_w + 7) >> 3) + 0] = 0;
dst[4 + bw + ((literal_run * literal_w + 7) >> 3) + 1] = 0;
#else
pll = 0;
#endif
dst[0] = 0x80 + ((literal_w < 8) ? literal_w - 1 : kByteLengthToRLEv2_W[literal_w >> 3]) * 2 + ((literal_run - 1) >> 8);
dst[1] = (literal_run - 1) & 0xff;
dst[2] = ((bw - 1) << 5) | kByteLengthToRLEv2_W[pw];
dst[3] = ((pgw - 1) << 5) | pll;
if (is_signed)
{
vmax >>= 1;
vmax |= vmin & ((T)1 << (bw*8-1));
}
StoreBytesBigEndian(dst + 4, vmax, bw);
s->u.intrle.hdr_bytes = 4 + bw;
s->u.intrle.pl_bytes = (pll * (pw * 8 + pgw) + 7) >> 3;
}
__syncthreads();
dst += s->u.intrle.hdr_bytes;
v0 -= (t < literal_run) ? vmin : 0;
if (literal_w < 8)
StoreBitsBigEndian(dst, (uint32_t)v0, literal_w, literal_run, t);
else if (t < literal_run)
StoreBytesBigEndian(dst + t * (literal_w >> 3), v0, (literal_w >> 3));
dst += s->u.intrle.pl_bytes;
}
else
{
// Delta mode
dst += literal_w;
literal_w = 0;
}
dst += (literal_run * literal_w + 7) >> 3;
numvals -= literal_run;
inpos += literal_run;
out_cnt += literal_run;
__syncthreads();
}
delta_run = s->u.intrle.delta_run;
if (delta_run > 0)
{
if (t == literal_run)
{
int64_t delta = (int64_t)v1 - (int64_t)v0;
uint64_t delta_base = (is_signed) ? (sizeof(T) > 4) ? zigzag64(v0) : zigzag32(v0) : v0;
if (delta == 0 && delta_run >= 3 && delta_run <= 10)
{
// Short repeat
uint32_t delta_bw = 8 - min(CountLeadingBytes64(delta_base), 7);
dst[0] = ((delta_bw - 1) << 3) + (delta_run - 3);
for (uint32_t i = 0, b = delta_bw * 8; i < delta_bw; i++)
{
b -= 8;
dst[1 + i] = static_cast<uint8_t>(delta_base >> b);
}
s->u.intrle.hdr_bytes = 1 + delta_bw;
}
else
{
// Delta
uint64_t delta_u = zigzag64(delta);
uint32_t bytecnt = 2;
dst[0] = 0xC0 + ((delta_run - 1) >> 8);
dst[1] = (delta_run - 1) & 0xff;
bytecnt += StoreVarint(dst + bytecnt, delta_base);
bytecnt += StoreVarint(dst + bytecnt, delta_u);
s->u.intrle.hdr_bytes = bytecnt;
}
}
__syncthreads();
dst += s->u.intrle.hdr_bytes;
numvals -= delta_run;
inpos += delta_run;
out_cnt += delta_run;
}
}
if (!t)
{
s->strm_pos[cid] = static_cast<uint32_t>(dst - s->chunk.streams[cid]);
}
__syncthreads();
return out_cnt;
}
/**
* @brief Store a group of strings as a single concatenated string
*
* @param[in] dst destination buffer
* @param[in] strenc string encoder state
* @param[in] len(t) string length (per thread)
* @param[in] t thread id
*
**/
static __device__ void StoreStringData(uint8_t *dst, strdata_enc_state_s *strenc, uint32_t len, int t)
{
// Start with summing up all the lengths
uint32_t pos = len;
uint32_t wt = t & 0x1f;
for (uint32_t n = 1; n<32; n <<= 1)
{
uint32_t tmp = SHFL(pos, (wt & ~n) | (n - 1));
pos += (wt & n) ? tmp : 0;
}
if (wt == 0x1f)
{
strenc->lengths_red[t >> 5] = pos;
}
dst += pos - len;
__syncthreads();
if (t < 32)
{
uint32_t wlen = (wt < 16) ? strenc->lengths_red[wt] : 0;
uint32_t wpos = wlen;
for (uint32_t n = 1; n<16; n <<= 1)
{
uint32_t tmp = SHFL(wpos, (wt & ~n) | (n - 1));
wpos += (wt & n) ? tmp : 0;
}
if (wt < 16)
{
strenc->lengths_red[wt] = wpos - wlen;
}
if (wt == 0xf)
{
strenc->char_count = wpos; // Update stream position
}
}
__syncthreads();
// TBD: Might be more efficient to loop over 4 strings and copy 8 consecutive character at a time
// rather than have each thread to a memcpy
if (len > 0)
{
memcpy(dst + strenc->lengths_red[t >> 5], strenc->str_data[t], len);
}
}
/**
* @brief In-place conversion from lengths to positions
*
* @param[in] vals input values
* @param[in] numvals number of values
* @param[in] t thread id
*
**/
template<class T>
inline __device__ void lengths_to_positions(volatile T *vals, uint32_t numvals, unsigned int t)
{
for (uint32_t n = 1; n<numvals; n <<= 1)
{
__syncthreads();
if ((t & n) && (t < numvals))
vals[t] += vals[(t & ~n) | (n - 1)];
}
}
/**
* @brief Timestamp scale table (powers of 10)
**/
static const __device__ __constant__ int32_t kTimeScale[10] =
{
1000000000, 100000000, 10000000, 1000000, 100000, 10000, 1000, 100, 10, 1
};
/**
* @brief Encode column data
*
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[in] num_columns Number of columns
* @param[in] num_rowgroups Number of row groups
*
**/
// blockDim {512,1,1}
extern "C" __global__ void __launch_bounds__(512)
gpuEncodeOrcColumnData(EncChunk *chunks, uint32_t num_columns, uint32_t num_rowgroups)
{
__shared__ __align__(16) orcenc_state_s state_g;
orcenc_state_s * const s = &state_g;
uint32_t col_id = blockIdx.x;
uint32_t group_id = blockIdx.y;
int t = threadIdx.x;
if (t < sizeof(EncChunk) / sizeof(uint32_t))
{
((volatile uint32_t *)&s->chunk)[t] = ((const uint32_t *)&chunks[group_id * num_columns + col_id])[t];
}
if (t < CI_NUM_STREAMS)
{
s->strm_pos[t] = 0;
}
__syncthreads();
if (!t)
{
s->cur_row = 0;
s->present_rows = 0;
s->present_out = 0;
s->numvals = 0;
s->numlengths = 0;
s->nnz = 0;
// Dictionary data is encoded in a separate kernel
if (s->chunk.encoding_kind == DICTIONARY_V2)
{
s->strm_pos[CI_DATA2] = s->chunk.strm_len[CI_DATA2];
s->strm_pos[CI_DICTIONARY] = s->chunk.strm_len[CI_DICTIONARY];
}
}
__syncthreads();
while (s->cur_row < s->chunk.num_rows || s->numvals + s->numlengths != 0)
{
// Encode valid map
if (s->present_rows < s->chunk.num_rows)
{
uint32_t present_rows = s->present_rows;
uint32_t nrows = min(s->chunk.num_rows - present_rows, 512 * 8 - (present_rows - (min(s->cur_row, s->present_out) & ~7)));
uint32_t nrows_out;
if (t*8 < nrows)
{
uint32_t row = s->chunk.start_row + present_rows + t * 8;
uint8_t valid = 0;
if (row < s->chunk.valid_rows)
{
const uint8_t *valid_map_base = reinterpret_cast<const uint8_t *>(s->chunk.valid_map_base);
valid = (valid_map_base) ? valid_map_base[row >> 3] : 0xff;
if (row + 7 > s->chunk.valid_rows)
{
valid = valid & ((1 << (s->chunk.valid_rows & 7)) - 1);
}
}
s->valid_buf[(row >> 3) & 0x1ff] = valid;
}
__syncthreads();
present_rows += nrows;
if (!t)
{
s->present_rows = present_rows;
}
// RLE encode the present stream
nrows_out = present_rows - s->present_out; // Should always be a multiple of 8 except at the end of the last row group
if (nrows_out > ((present_rows < s->chunk.num_rows) ? 130 * 8 : 0))
{
uint32_t present_out = s->present_out;
if (s->chunk.strm_id[CI_PRESENT] >= 0)
{
uint32_t flush = (present_rows < s->chunk.num_rows) ? 0 : 7;
nrows_out = (nrows_out + flush) >> 3;
nrows_out = ByteRLE<CI_PRESENT, 0x1ff>(s, s->valid_buf, (s->chunk.start_row + present_out) >> 3, nrows_out, flush, t) * 8;
}
__syncthreads();
if (!t)
{
s->present_out = min(present_out + nrows_out, present_rows);
}
}
__syncthreads();
}
// Fetch non-null values
if (!s->chunk.streams[CI_DATA])
{
// Pass-through
__syncthreads();
if (!t)
{
s->cur_row = s->present_rows;
s->strm_pos[CI_DATA] = s->cur_row * s->chunk.dtype_len;
}
__syncthreads();
}
else if (s->cur_row < s->present_rows)
{
uint32_t maxnumvals = (s->chunk.type_kind == BOOLEAN) ? 2048 : 1024;
uint32_t nrows = min(min(s->present_rows - s->cur_row, maxnumvals - max(s->numvals, s->numlengths)), 512);
uint32_t row = s->chunk.start_row + s->cur_row + t;
uint32_t valid = (t < nrows) ? (s->valid_buf[(row >> 3) & 0x1ff] >> (row & 7)) & 1 : 0;
s->buf.u32[t] = valid;
// TODO: Could use a faster reduction relying on _popc() for the initial phase
lengths_to_positions(s->buf.u32, 512, t);
__syncthreads();
if (valid)
{
int nz_idx = (s->nnz + s->buf.u32[t] - 1) & (maxnumvals - 1);
const uint8_t *base = reinterpret_cast<const uint8_t *>(s->chunk.column_data_base);
switch (s->chunk.type_kind)
{
case INT:
case DATE:
case FLOAT:
s->vals.u32[nz_idx] = reinterpret_cast<const uint32_t *>(base)[row];
break;
case DOUBLE:
case LONG:
s->vals.u64[nz_idx] = reinterpret_cast<const uint64_t *>(base)[row];
break;
case SHORT:
s->vals.u32[nz_idx] = reinterpret_cast<const uint16_t *>(base)[row];
break;
case BOOLEAN:
case BYTE:
s->vals.u8[nz_idx] = reinterpret_cast<const uint8_t *>(base)[row];
break;
case TIMESTAMP: {
int64_t ts = reinterpret_cast<const int64_t *>(base)[row];
int32_t ts_scale = kTimeScale[min(s->chunk.scale, 9)];
int64_t seconds = ts / ts_scale;
int32_t nanos = (ts - seconds * ts_scale);
if (nanos < 0)
{
seconds += 1;
nanos += ts_scale;
}
s->vals.i64[nz_idx] = seconds - kORCTimeToUTC;
if (nanos != 0)
{
// Trailing zeroes are encoded in the lower 3-bits
uint32_t zeroes = 0;
nanos *= kTimeScale[9 - min(s->chunk.scale, 9)];
if (!(nanos % 100))
{
nanos /= 100;
zeroes = 1;
while (zeroes < 7 && !(nanos % 10))
{
nanos /= 10;
zeroes++;
}
}
nanos = (nanos << 3) + zeroes;
}
s->lengths.u32[nz_idx] = nanos;
break;
}
case STRING:
if (s->chunk.encoding_kind == DICTIONARY_V2)
{
uint32_t dict_idx = reinterpret_cast<const uint32_t *>(base)[row];
if (dict_idx > 0x7fffffffu)
dict_idx = reinterpret_cast<const uint32_t *>(base)[dict_idx & 0x7fffffffu];
s->vals.u32[nz_idx] = dict_idx;
}
else
{
const nvstrdesc_s *str_desc = reinterpret_cast<const nvstrdesc_s *>(base) + row;
const char *ptr = str_desc->ptr;
uint32_t count = static_cast<uint32_t>(str_desc->count);
s->u.strenc.str_data[s->buf.u32[t] - 1] = ptr;
s->lengths.u32[nz_idx] = count;
}
break;
default:
break;
}
}
__syncthreads();
if (s->chunk.type_kind == STRING && s->chunk.encoding_kind != DICTIONARY_V2)
{
// Store string data
uint32_t nz = s->buf.u32[511];
uint32_t nz_idx = (s->nnz + t) & 0x3ff;
uint32_t len = (t < nz && s->u.strenc.str_data[t]) ? s->lengths.u32[nz_idx] : 0;
StoreStringData(s->chunk.streams[CI_DATA] + s->strm_pos[CI_DATA], &s->u.strenc, len, t);
if (!t)
{
s->strm_pos[CI_DATA] += s->u.strenc.char_count;
}
__syncthreads();
}
else if (s->chunk.type_kind == BOOLEAN)
{
// bool8 -> 8x bool1
uint32_t nz = s->buf.u32[511];
uint8_t n = ((s->nnz + nz) - (s->nnz & ~7) + 7) >> 3;
if (t < n)
{
uint32_t idx8 = (s->nnz & ~7) + (t << 3);
s->lengths.u8[((s->nnz >> 3) + t) & 0x1ff] = ((s->vals.u8[(idx8 + 0) & 0x7ff] & 1) << 7)
| ((s->vals.u8[(idx8 + 1) & 0x7ff] & 1) << 6)
| ((s->vals.u8[(idx8 + 2) & 0x7ff] & 1) << 5)
| ((s->vals.u8[(idx8 + 3) & 0x7ff] & 1) << 4)
| ((s->vals.u8[(idx8 + 4) & 0x7ff] & 1) << 3)
| ((s->vals.u8[(idx8 + 5) & 0x7ff] & 1) << 2)
| ((s->vals.u8[(idx8 + 6) & 0x7ff] & 1) << 1)
| ((s->vals.u8[(idx8 + 7) & 0x7ff] & 1) << 0);
}
__syncthreads();
}
if (!t)
{
uint32_t nz = s->buf.u32[511];
s->nnz += nz;
s->numvals += nz;
s->numlengths += (s->chunk.type_kind == TIMESTAMP || (s->chunk.type_kind == STRING && s->chunk.encoding_kind != DICTIONARY_V2)) ? nz : 0;
s->cur_row += nrows;
}
__syncthreads();
// Encode values
if (s->numvals > 0)
{
uint32_t flush = (s->cur_row == s->chunk.num_rows) ? 7 : 0, n;
switch (s->chunk.type_kind)
{
case SHORT:
case INT:
case DATE:
n = IntegerRLE<CI_DATA, int32_t, true, 0x3ff>(s, s->vals.i32, s->nnz - s->numvals, s->numvals, flush, t);
break;
case LONG:
case TIMESTAMP:
n = IntegerRLE<CI_DATA, int64_t, true, 0x3ff>(s, s->vals.i64, s->nnz - s->numvals, s->numvals, flush, t);
break;
case BYTE:
n = ByteRLE<CI_DATA, 0x3ff>(s, s->vals.u8, s->nnz - s->numvals, s->numvals, flush, t);
break;
case BOOLEAN:
n = ByteRLE<CI_DATA, 0x1ff>(s, s->lengths.u8, (s->nnz - s->numvals + flush) >> 3, (s->numvals + flush) >> 3, flush, t) * 8;
break;
case FLOAT:
StoreBytes<CI_DATA, 0xfff>(s, s->vals.u8, (s->nnz - s->numvals) * 4, s->numvals * 4, t);
n = s->numvals;
break;
case DOUBLE:
StoreBytes<CI_DATA, 0x1fff>(s, s->vals.u8, (s->nnz - s->numvals) * 8, s->numvals * 8, t);
n = s->numvals;
break;
case STRING:
if (s->chunk.encoding_kind == DICTIONARY_V2) {
n = IntegerRLE<CI_DATA, uint32_t, false, 0x3ff>(s, s->vals.u32, s->nnz - s->numvals, s->numvals, flush, t);
} else {
n = s->numvals;
}
break;
default:
n = s->numvals;
break;
}
__syncthreads();
if (!t)
{
s->numvals -= min(n, s->numvals);
}
}
// Encode secondary stream values
if (s->numlengths > 0)
{
uint32_t flush = (s->cur_row == s->chunk.num_rows) ? 1 : 0, n;
switch (s->chunk.type_kind)
{
case TIMESTAMP:
case STRING:
n = IntegerRLE<CI_DATA2, uint32_t, false, 0x3ff>(s, s->lengths.u32, s->nnz - s->numlengths, s->numlengths, flush, t);
break;
default:
n = s->numlengths;
break;
}
__syncthreads();
if (!t)
{
s->numlengths -= min(n, s->numlengths);
}
}
}
__syncthreads();
}
__syncthreads();
if (t <= CI_PRESENT && s->chunk.strm_id[t] >= 0)
{
// Update actual compressed length
chunks[group_id * num_columns + col_id].strm_len[t] = s->strm_pos[t];
if (!s->chunk.streams[t])
{
chunks[group_id * num_columns + col_id].streams[t] = reinterpret_cast<uint8_t *>(const_cast<void *>(s->chunk.column_data_base)) + s->chunk.start_row * s->chunk.dtype_len;
}
}
}
/**
* @brief Encode column dictionaries
*
* @param[in] stripes Stripe dictionaries device array [stripe][string_column]
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[in] num_columns Number of columns
*
**/
// blockDim {512,1,1}
extern "C" __global__ void __launch_bounds__(512)
gpuEncodeStringDictionaries(StripeDictionary *stripes, EncChunk *chunks, uint32_t num_columns)
{
__shared__ __align__(16) orcenc_state_s state_g;
orcenc_state_s * const s = &state_g;
uint32_t stripe_id = blockIdx.x;
uint32_t cid = (blockIdx.y) ? CI_DICTIONARY : CI_DATA2;
uint32_t chunk_id;
int t = threadIdx.x;
const nvstrdesc_s *str_desc;
const uint32_t *dict_data;
if (t < sizeof(StripeDictionary) / sizeof(uint32_t))
{
((volatile uint32_t *)&s->u.dict_stripe)[t] = ((const uint32_t *)&stripes[stripe_id])[t];
}
__syncthreads();
chunk_id = s->u.dict_stripe.start_chunk * num_columns + s->u.dict_stripe.column_id;
if (t < sizeof(EncChunk) / sizeof(uint32_t))
{
((volatile uint32_t *)&s->chunk)[t] = ((const uint32_t *)&chunks[chunk_id])[t];
}
if (t == 0)
{
s->strm_pos[cid] = 0;
s->numlengths = 0;
s->nrows = s->u.dict_stripe.num_strings;
s->cur_row = 0;
}
str_desc = reinterpret_cast<const nvstrdesc_s *>(s->u.dict_stripe.column_data_base);
dict_data = s->u.dict_stripe.dict_data;
__syncthreads();
if (s->chunk.encoding_kind != DICTIONARY_V2)
{
return; // This column isn't using dictionary encoding -> bail out
}
while (s->cur_row < s->nrows || s->numlengths != 0)
{
uint32_t numvals = min(s->nrows - s->cur_row, min(1024 - s->numlengths, 512));
uint32_t string_idx = (t < numvals) ? dict_data[s->cur_row + t] : 0;
if (cid == CI_DICTIONARY)
{
// Encoding string contents
const char *ptr = (t < numvals) ? str_desc[string_idx].ptr : 0;
uint32_t count = (t < numvals) ? static_cast<uint32_t>(str_desc[string_idx].count) : 0;
s->u.strenc.str_data[t] = ptr;
StoreStringData(s->chunk.streams[CI_DICTIONARY] + s->strm_pos[CI_DICTIONARY], &s->u.strenc, (ptr) ? count : 0, t);
if (!t)
{
s->strm_pos[CI_DICTIONARY] += s->u.strenc.char_count;
}
}
else
{
// Encoding string lengths
uint32_t count = (t < numvals) ? static_cast<uint32_t>(str_desc[string_idx].count) : 0;
uint32_t nz_idx = (s->cur_row + t) & 0x3ff;
if (t < numvals)
s->lengths.u32[nz_idx] = count;
__syncthreads();
if (s->numlengths + numvals > 0)
{
uint32_t flush = (s->cur_row + numvals == s->nrows) ? 1 : 0;
uint32_t n = IntegerRLE<CI_DATA2, uint32_t, false, 0x3ff>(s, s->lengths.u32, s->cur_row, s->numlengths + numvals, flush, t);
__syncthreads();
if (!t)
{
s->numlengths += numvals;
s->numlengths -= min(n, s->numlengths);
}
}
}
if (t == 0)
{
s->cur_row += numvals;
}
__syncthreads();
}
if (t == 0)
{
chunks[chunk_id].strm_len[cid] = s->strm_pos[cid];
}
}
/**
* @brief Merge chunked column data into a single contiguous stream
*
* @param[in] strm_desc StripeStream device array [stripe][stream]
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[in] num_stripe_streams Total number of streams
* @param[in] num_columns Number of columns
*
**/
// blockDim {1024,1,1}
extern "C" __global__ void __launch_bounds__(1024)
gpuCompactOrcDataStreams(StripeStream *strm_desc, EncChunk *chunks, uint32_t num_columns)
{
__shared__ __align__(16) StripeStream ss;
__shared__ __align__(16) EncChunk ck0;
__shared__ uint8_t * volatile ck_curptr_g;
__shared__ uint32_t volatile ck_curlen_g;
uint32_t strm_id = blockIdx.x;
uint32_t ck0_id, cid;
uint32_t t = threadIdx.x;
uint8_t *dst_ptr;
if (t < sizeof(StripeStream) / sizeof(uint32_t))
{
((volatile uint32_t *)&ss)[t] = ((const uint32_t *)&strm_desc[strm_id])[t];
}
__syncthreads();
ck0_id = ss.first_chunk_id;
if (t < sizeof(EncChunk) / sizeof(uint32_t))
{
((volatile uint32_t *)&ck0)[t] = ((const uint32_t *)&chunks[ck0_id])[t];
}
__syncthreads();
cid = ss.strm_type;
dst_ptr = ck0.streams[cid] + ck0.strm_len[cid];
for (uint32_t g = 1; g < ss.num_chunks; g++)
{
uint8_t *src_ptr;
uint32_t len;
if (t == 0)
{
src_ptr = chunks[ck0_id + g * num_columns].streams[cid];
len = chunks[ck0_id + g * num_columns].strm_len[cid];
if (src_ptr != dst_ptr)
{
chunks[ck0_id + g * num_columns].streams[cid] = dst_ptr;
}
ck_curptr_g = src_ptr;
ck_curlen_g = len;
}
__syncthreads();
src_ptr = ck_curptr_g;
len = ck_curlen_g;
if (len > 0 && src_ptr != dst_ptr)
{
for (uint32_t i = 0; i < len; i += 1024)
{
uint8_t v = (i + t < len) ? src_ptr[i + t] : 0;
__syncthreads();
if (i + t < len)
{
dst_ptr[i + t] = v;
}
}
}
dst_ptr += len;
__syncthreads();
}
if (!t)
{
strm_desc[strm_id].stream_size = dst_ptr - ck0.streams[cid];
}
}
/**
* @brief Initializes compression input/output structures
*
* @param[in] strm_desc StripeStream device array [stripe][stream]
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[out] comp_in Per-block compression input parameters
* @param[out] comp_out Per-block compression status
* @param[in] compressed_bfr Compression output buffer
* @param[in] comp_blk_size Compression block size
*
**/
// blockDim {256,1,1}
extern "C" __global__ void __launch_bounds__(256)
gpuInitCompressionBlocks(StripeStream *strm_desc, EncChunk *chunks, gpu_inflate_input_s *comp_in, gpu_inflate_status_s *comp_out, uint8_t *compressed_bfr, uint32_t comp_blk_size)
{
__shared__ __align__(16) StripeStream ss;
__shared__ uint8_t * volatile uncomp_base_g;
uint32_t strm_id = blockIdx.x;
uint32_t t = threadIdx.x;
uint32_t num_blocks;
uint8_t *src, *dst;
if (t < sizeof(StripeStream) / sizeof(uint32_t))
{
((volatile uint32_t *)&ss)[t] = ((const uint32_t *)&strm_desc[strm_id])[t];
}
__syncthreads();
if (t == 0)
{
uncomp_base_g = chunks[ss.first_chunk_id].streams[ss.strm_type];
}
__syncthreads();
src = uncomp_base_g;
dst = compressed_bfr + ss.bfr_offset;
num_blocks = (ss.stream_size > 0) ? (ss.stream_size - 1) / comp_blk_size + 1 : 1;
for (uint32_t b = t; b < num_blocks; b += 256)
{
gpu_inflate_input_s *blk_in = &comp_in[ss.first_block + b];
gpu_inflate_status_s *blk_out = &comp_out[ss.first_block + b];
uint32_t blk_size = min(comp_blk_size, ss.stream_size - min(b * comp_blk_size, ss.stream_size));
blk_in->srcDevice = src + b * comp_blk_size;
blk_in->srcSize = blk_size;
blk_in->dstDevice = dst + b * (3 + comp_blk_size) + 3;
blk_in->dstSize = blk_size + 3;
blk_out->bytes_written = blk_size;
blk_out->status = 1;
blk_out->reserved = 0;
}
}
/**
* @brief Compacts compressed blocks in a single contiguous stream, and update 3-byte block length fields
*
* @param[in,out] strm_desc StripeStream device array [stripe][stream]
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[in] comp_in Per-block compression input parameters
* @param[in] comp_out Per-block compression status
* @param[in] compressed_bfr Compression output buffer
* @param[in] comp_blk_size Compression block size
*
**/
// blockDim {1024,1,1}
extern "C" __global__ void __launch_bounds__(1024)
gpuCompactCompressedBlocks(StripeStream *strm_desc, gpu_inflate_input_s *comp_in, gpu_inflate_status_s *comp_out, uint8_t *compressed_bfr, uint32_t comp_blk_size)
{
__shared__ __align__(16) StripeStream ss;
__shared__ const uint8_t * volatile comp_src_g;
__shared__ uint32_t volatile comp_len_g;
uint32_t strm_id = blockIdx.x;
uint32_t t = threadIdx.x;
uint32_t num_blocks, b, blk_size;
const uint8_t *src;
uint8_t *dst;
if (t < sizeof(StripeStream) / sizeof(uint32_t))
{
((volatile uint32_t *)&ss)[t] = ((const uint32_t *)&strm_desc[strm_id])[t];
}
__syncthreads();
num_blocks = (ss.stream_size > 0) ? (ss.stream_size - 1) / comp_blk_size + 1 : 0;
dst = compressed_bfr + ss.bfr_offset;
b = 0;
do
{
if (t == 0)
{
gpu_inflate_input_s *blk_in = &comp_in[ss.first_block + b];
gpu_inflate_status_s *blk_out = &comp_out[ss.first_block + b];
uint32_t src_len = min(comp_blk_size, ss.stream_size - min(b * comp_blk_size, ss.stream_size));
uint32_t dst_len = (blk_out->status == 0) ? blk_out->bytes_written : src_len;
uint32_t blk_size24;
if (dst_len >= src_len)
{
// Copy from uncompressed source
src = reinterpret_cast<const uint8_t *>(blk_in->srcDevice);
blk_out->bytes_written = src_len;
dst_len = src_len;
blk_size24 = dst_len * 2 + 1;
}
else
{
// Compressed block
src = reinterpret_cast<const uint8_t *>(blk_in->dstDevice);
blk_size24 = dst_len * 2 + 0;
}
dst[0] = static_cast<uint8_t>(blk_size24 >> 0);
dst[1] = static_cast<uint8_t>(blk_size24 >> 8);
dst[2] = static_cast<uint8_t>(blk_size24 >> 16);
comp_src_g = src;
comp_len_g = dst_len;
}
__syncthreads();
src = comp_src_g;
blk_size = comp_len_g;
dst += 3; // skip over length written by thread0
if (src != dst)
{
for (uint32_t i = 0; i < blk_size; i += 1024)
{
uint8_t v = (i + t < blk_size) ? src[i + t] : 0;
__syncthreads();
if (i + t < blk_size)
{
dst[i + t] = v;
}
}
}
dst += blk_size;
__syncthreads();
} while (++b < num_blocks);
// Update stripe stream with the compressed size
if (t == 0)
{
strm_desc[strm_id].stream_size = static_cast<uint32_t>(dst - (compressed_bfr + ss.bfr_offset));
}
}
/**
* @brief Launches kernel for encoding column data
*
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[in] num_columns Number of columns
* @param[in] num_rowgroups Number of row groups
* @param[in] stream CUDA stream to use, default 0
*
* @return hipSuccess if successful, a CUDA error code otherwise
**/
hipError_t EncodeOrcColumnData(EncChunk *chunks, uint32_t num_columns, uint32_t num_rowgroups, hipStream_t stream)
{
dim3 dim_block(512, 1); // 512 threads per chunk
dim3 dim_grid(num_columns, num_rowgroups);
hipLaunchKernelGGL(( gpuEncodeOrcColumnData) , dim3(dim_grid), dim3(dim_block), 0, stream , chunks, num_columns, num_rowgroups);
return hipSuccess;
}
/**
* @brief Launches kernel for encoding column dictionaries
*
* @param[in] stripes Stripe dictionaries device array [stripe][string_column]
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[in] num_string_columns Number of string columns
* @param[in] num_columns Number of columns
* @param[in] num_stripes Number of stripes
* @param[in] stream CUDA stream to use, default 0
*
* @return hipSuccess if successful, a CUDA error code otherwise
**/
hipError_t EncodeStripeDictionaries(StripeDictionary *stripes, EncChunk *chunks, uint32_t num_string_columns, uint32_t num_columns, uint32_t num_stripes, hipStream_t stream)
{
dim3 dim_block(512, 1); // 512 threads per dictionary
dim3 dim_grid(num_string_columns * num_stripes, 2);
hipLaunchKernelGGL(( gpuEncodeStringDictionaries) , dim3(dim_grid), dim3(dim_block), 0, stream , stripes, chunks, num_columns);
return hipSuccess;
}
/**
* @brief Launches kernel for compacting chunked column data prior to compression
*
* @param[in] strm_desc StripeStream device array [stripe][stream]
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[in] num_stripe_streams Total number of streams
* @param[in] num_columns Number of columns
* @param[in] stream CUDA stream to use, default 0
*
* @return hipSuccess if successful, a CUDA error code otherwise
**/
hipError_t CompactOrcDataStreams(StripeStream *strm_desc, EncChunk *chunks, uint32_t num_stripe_streams, uint32_t num_columns, hipStream_t stream)
{
dim3 dim_block(1024, 1);
dim3 dim_grid(num_stripe_streams, 1);
hipLaunchKernelGGL(( gpuCompactOrcDataStreams) , dim3(dim_grid), dim3(dim_block), 0, stream , strm_desc, chunks, num_columns);
return hipSuccess;
}
/**
* @brief Launches kernel(s) for compressing data streams
*
* @param[in] compressed_data Output compressed blocks
* @param[in] strm_desc StripeStream device array [stripe][stream]
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[out] comp_in Per-block compression input parameters
* @param[out] comp_out Per-block compression status
* @param[in] num_stripe_streams Total number of streams
* @param[in] num_compressed_blocks Total number of compressed blocks
* @param[in] compression Type of compression
* @param[in] comp_blk_size Compression block size
* @param[in] stream CUDA stream to use, default 0
*
* @return hipSuccess if successful, a CUDA error code otherwise
**/
hipError_t CompressOrcDataStreams(uint8_t *compressed_data, StripeStream *strm_desc, EncChunk *chunks, gpu_inflate_input_s *comp_in,
gpu_inflate_status_s *comp_out, uint32_t num_stripe_streams, uint32_t num_compressed_blocks, CompressionKind compression, uint32_t comp_blk_size, hipStream_t stream)
{
dim3 dim_block_init(256, 1);
dim3 dim_grid(num_stripe_streams, 1);
hipLaunchKernelGGL(( gpuInitCompressionBlocks) , dim3(dim_grid), dim3(dim_block_init), 0, stream , strm_desc, chunks, comp_in, comp_out, compressed_data, comp_blk_size);
if (compression == SNAPPY)
{
gpu_snap(comp_in, comp_out, num_compressed_blocks, stream);
}
dim3 dim_block_compact(1024, 1);
hipLaunchKernelGGL(( gpuCompactCompressedBlocks) , dim3(dim_grid), dim3(dim_block_compact), 0, stream , strm_desc, comp_in, comp_out, compressed_data, comp_blk_size);
return hipSuccess;
}
} // namespace gpu
} // namespace orc
} // namespace io
} // namespace cudf
| 77113a8c28a248ce28a1bf58cfc47158e36ff439.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "orc_common.h"
#include "orc_gpu.h"
#include <io/utilities/block_utils.cuh>
// Apache ORC reader does not handle zero-length patch lists for RLEv2 mode2
// Workaround replaces zero-length patch lists by a dummy zero patch
#define ZERO_PLL_WAR 1
namespace cudf {
namespace io {
namespace orc {
namespace gpu {
#define SCRATCH_BFRSZ (512*4)
static __device__ __constant__ int64_t kORCTimeToUTC = 1420070400; // Seconds from January 1st, 1970 to January 1st, 2015
struct byterle_enc_state_s
{
uint32_t literal_run;
uint32_t repeat_run;
volatile uint32_t rpt_map[(512 / 32) + 1];
};
struct intrle_enc_state_s
{
uint32_t literal_run;
uint32_t delta_run;
uint32_t literal_mode;
uint32_t literal_w;
uint32_t hdr_bytes;
uint32_t pl_bytes;
volatile uint32_t delta_map[(512 / 32) + 1];
volatile union {
uint32_t u32[(512 / 32) * 2];
uint64_t u64[(512 / 32) * 2];
} scratch;
};
struct strdata_enc_state_s
{
uint32_t char_count;
uint32_t lengths_red[(512 / 32)];
const char *str_data[512];
};
struct orcenc_state_s
{
uint32_t cur_row; // Current row in group
uint32_t present_rows; // # of rows in present buffer
uint32_t present_out; // # of rows in present buffer that have been flushed
uint32_t nrows; // # of rows in current batch
uint32_t numvals; // # of non-zero values in current batch (<=nrows)
uint32_t numlengths; // # of non-zero values in DATA2 batch
uint32_t nnz; // Running count of non-null values
EncChunk chunk;
uint32_t strm_pos[CI_NUM_STREAMS];
uint8_t valid_buf[512]; // valid map bits
union {
byterle_enc_state_s byterle;
intrle_enc_state_s intrle;
strdata_enc_state_s strenc;
StripeDictionary dict_stripe;
} u;
union {
uint8_t u8[SCRATCH_BFRSZ]; // general scratch buffer
uint32_t u32[SCRATCH_BFRSZ /4];
} buf;
union {
uint8_t u8[2048];
uint32_t u32[1024];
int32_t i32[1024];
uint64_t u64[1024];
int64_t i64[1024];
} vals;
union {
uint8_t u8[2048];
uint32_t u32[1024];
} lengths;
};
static inline __device__ uint32_t zigzag32(int32_t v) { int32_t s = (v >> 31); return ((v ^ s) * 2) - s; }
static inline __device__ uint64_t zigzag64(int64_t v) { int64_t s = (v < 0) ? 1 : 0; return ((v ^ -s) * 2) + s; }
static inline __device__ uint32_t CountLeadingBytes32(uint32_t v) { return __clz(v) >> 3; }
static inline __device__ uint32_t CountLeadingBytes64(uint64_t v) { return __clzll(v) >> 3; }
/**
* @brief Raw data output
*
* @param[in] cid stream type (strm_pos[cid] will be updated and output stored at streams[cid]+strm_pos[cid])
* @param[in] inmask input buffer position mask for circular buffers
* @param[in] s encoder state
* @param[in] inbuf base input buffer
* @param[in] inpos position in input buffer
* @param[in] count number of bytes to encode
* @param[in] t thread id
*
**/
template<StreamIndexType cid, uint32_t inmask>
static __device__ void StoreBytes(orcenc_state_s *s, const uint8_t *inbuf, uint32_t inpos, uint32_t count, int t)
{
uint8_t *dst = s->chunk.streams[cid] + s->strm_pos[cid];
while (count > 0)
{
uint32_t n = min(count, 512);
if (t < n)
{
dst[t] = inbuf[(inpos + t) & inmask];
}
dst += n;
inpos += n;
count -= n;
}
__syncthreads();
if (!t)
{
s->strm_pos[cid] = static_cast<uint32_t>(dst - s->chunk.streams[cid]);
}
}
/**
* @brief ByteRLE encoder
*
* @param[in] cid stream type (strm_pos[cid] will be updated and output stored at streams[cid]+strm_pos[cid])
* @param[in] s encoder state
* @param[in] inbuf base input buffer
* @param[in] inpos position in input buffer
* @param[in] inmask input buffer position mask for circular buffers
* @param[in] numvals max number of values to encode
* @param[in] flush encode all remaining values if nonzero
* @param[in] t thread id
*
* @return number of input values encoded
*
**/
template<StreamIndexType cid, uint32_t inmask>
static __device__ uint32_t ByteRLE(orcenc_state_s *s, const uint8_t *inbuf, uint32_t inpos, uint32_t numvals, uint32_t flush, int t)
{
uint8_t *dst = s->chunk.streams[cid] + s->strm_pos[cid];
uint32_t out_cnt = 0;
while (numvals > 0)
{
uint8_t v0 = (t < numvals) ? inbuf[(inpos + t) & inmask] : 0;
uint8_t v1 = (t + 1 < numvals) ? inbuf[(inpos + t + 1) & inmask] : 0;
uint32_t rpt_map = BALLOT(t + 1 < numvals && v0 == v1), literal_run, repeat_run, maxvals = min(numvals, 512);
if (!(t & 0x1f))
s->u.byterle.rpt_map[t >> 5] = rpt_map;
__syncthreads();
if (t == 0)
{
// Find the start of an identical 3-byte sequence
// TBD: The two loops below could be eliminated using more ballot+ffs using warp0
literal_run = 0;
repeat_run = 0;
while (literal_run < maxvals)
{
uint32_t next = s->u.byterle.rpt_map[(literal_run >> 5) + 1];
uint32_t mask = rpt_map & __funnelshift_r(rpt_map, next, 1);
if (mask)
{
uint32_t literal_run_ofs = __ffs(mask) - 1;
literal_run += literal_run_ofs;
repeat_run = __ffs(~((rpt_map >> literal_run_ofs) >> 1));
if (repeat_run + literal_run_ofs == 32)
{
while (next == ~0)
{
uint32_t next_idx = ((literal_run + repeat_run) >> 5) + 1;
next = (next_idx < 512 / 32) ? s->u.byterle.rpt_map[next_idx] : 0;
repeat_run += 32;
}
repeat_run += __ffs(~next) - 1;
}
repeat_run = min(repeat_run + 1, maxvals - min(literal_run, maxvals));
if (repeat_run < 3)
{
literal_run += (flush && literal_run + repeat_run >= numvals) ? repeat_run : 0;
repeat_run = 0;
}
break;
}
rpt_map = next;
literal_run += 32;
}
if (repeat_run >= 130)
{
// Limit large runs to multiples of 130
repeat_run = (repeat_run >= 3*130) ? 3*130 : (repeat_run >= 2*130) ? 2*130 : 130;
}
else if (literal_run && literal_run + repeat_run == maxvals)
{
repeat_run = 0; // Try again at next iteration
}
s->u.byterle.repeat_run = repeat_run;
s->u.byterle.literal_run = min(literal_run, maxvals);
}
__syncthreads();
literal_run = s->u.byterle.literal_run;
if (!flush && literal_run == numvals)
{
literal_run &= ~0x7f;
if (!literal_run)
break;
}
if (literal_run > 0)
{
uint32_t num_runs = (literal_run + 0x7f) >> 7;
if (t < literal_run)
{
uint32_t run_id = t >> 7;
uint32_t run = min(literal_run - run_id * 128, 128);
if (!(t & 0x7f))
dst[run_id + t] = 0x100 - run;
dst[run_id + t + 1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0;
}
dst += num_runs + literal_run;
out_cnt += literal_run;
numvals -= literal_run;
inpos += literal_run;
}
repeat_run = s->u.byterle.repeat_run;
if (repeat_run > 0)
{
while (repeat_run >= 130)
{
if (t == literal_run) // repeat_run follows literal_run
{
dst[0] = 0x7f;
dst[1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0;
}
dst += 2;
out_cnt += 130;
numvals -= 130;
inpos += 130;
repeat_run -= 130;
}
if (!flush && repeat_run == numvals)
{
// Wait for more data in case we can continue the run later
break;
}
if (repeat_run >= 3)
{
if (t == literal_run) // repeat_run follows literal_run
{
dst[0] = repeat_run - 3;
dst[1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0;
}
dst += 2;
out_cnt += repeat_run;
numvals -= repeat_run;
inpos += repeat_run;
}
}
}
if (!t)
{
s->strm_pos[cid] = static_cast<uint32_t>(dst - s->chunk.streams[cid]);
}
__syncthreads();
return out_cnt;
}
/**
* @brief Maps the symbol size in bytes to RLEv2 5-bit length code
**/
static const __device__ __constant__ uint8_t kByteLengthToRLEv2_W[9] =
{
0, 7, 15, 23, 27, 28, 29, 30, 31
};
/**
* @brief Encode a varint value, return the number of bytes written
**/
static inline __device__ uint32_t StoreVarint(uint8_t *dst, uint64_t v)
{
uint32_t bytecnt = 0;
for (;;)
{
uint32_t c = (uint32_t)(v & 0x7f);
v >>= 7u;
if (v == 0)
{
dst[bytecnt++] = c;
break;
}
else
{
dst[bytecnt++] = c + 0x80;
}
}
return bytecnt;
}
static inline __device__ void intrle_minmax(int64_t &vmin, int64_t &vmax) { vmin = INT64_MIN; vmax = INT64_MAX; }
//static inline __device__ void intrle_minmax(uint64_t &vmin, uint64_t &vmax) { vmin = UINT64_C(0); vmax = UINT64_MAX; }
static inline __device__ void intrle_minmax(int32_t &vmin, int32_t &vmax) { vmin = INT32_MIN; vmax = INT32_MAX; }
static inline __device__ void intrle_minmax(uint32_t &vmin, uint32_t &vmax) { vmin = UINT32_C(0); vmax = UINT32_MAX; }
template<class T>
static inline __device__ void StoreBytesBigEndian(uint8_t *dst, T v, uint32_t w)
{
for (uint32_t i = 0, b = w * 8; i < w; ++i)
{
b -= 8;
dst[i] = static_cast<uint8_t>(v >> b);
}
}
// Combine and store bits for symbol widths less than 8
static inline __device__ void StoreBitsBigEndian(uint8_t *dst, uint32_t v, uint32_t w, int num_vals, int t)
{
if (t <= (num_vals | 0x1f))
{
uint32_t mask;
if (w <= 1)
{
v = (v << 1) | (SHFL_XOR(v, 1) & 0x1);
v = (v << 2) | (SHFL_XOR(v, 2) & 0x3);
v = (v << 4) | (SHFL_XOR(v, 4) & 0xf);
mask = 0x7;
}
else if (w <= 2)
{
v = (v << 2) | (SHFL_XOR(v, 1) & 0x3);
v = (v << 4) | (SHFL_XOR(v, 2) & 0xf);
mask = 0x3;
}
else // if (w <= 4)
{
v = (v << 4) | (SHFL_XOR(v, 1) & 0xf);
mask = 0x1;
}
if (t < num_vals && !(t & mask))
{
dst[(t * w) >> 3] = static_cast<uint8_t>(v);
}
}
}
/**
* @brief Integer RLEv2 encoder
*
* @param[in] cid stream type (strm_pos[cid] will be updated and output stored at streams[cid]+strm_pos[cid])
* @param[in] s encoder state
* @param[in] inbuf base input buffer
* @param[in] inpos position in input buffer
* @param[in] inmask input buffer position mask for circular buffers
* @param[in] numvals max number of values to encode
* @param[in] flush encode all remaining values if nonzero
* @param[in] t thread id
*
* @return number of input values encoded
*
**/
template<StreamIndexType cid, class T, bool is_signed, uint32_t inmask>
static __device__ uint32_t IntegerRLE(orcenc_state_s *s, const T *inbuf, uint32_t inpos, uint32_t numvals, uint32_t flush, int t)
{
uint8_t *dst = s->chunk.streams[cid] + s->strm_pos[cid];
uint32_t out_cnt = 0;
while (numvals > 0)
{
T v0 = (t < numvals) ? inbuf[(inpos + t) & inmask] : 0;
T v1 = (t + 1 < numvals) ? inbuf[(inpos + t + 1) & inmask] : 0;
T v2 = (t + 2 < numvals) ? inbuf[(inpos + t + 2) & inmask] : 0;
uint32_t delta_map = BALLOT(t + 2 < numvals && v1 - v0 == v2 - v1), maxvals = min(numvals, 512), literal_run, delta_run;
if (!(t & 0x1f))
s->u.intrle.delta_map[t >> 5] = delta_map;
__syncthreads();
if (!t)
{
// Find the start of the next delta run (2 consecutive values with the same delta)
literal_run = delta_run = 0;
while (literal_run < maxvals)
{
if (delta_map != 0)
{
uint32_t literal_run_ofs = __ffs(delta_map) - 1;
literal_run += literal_run_ofs;
delta_run = __ffs(~((delta_map >> literal_run_ofs) >> 1));
if (literal_run_ofs + delta_run == 32)
{
for (;;)
{
uint32_t delta_idx = (literal_run + delta_run) >> 5;
delta_map = (delta_idx < 512/32) ? s->u.intrle.delta_map[delta_idx] : 0;
if (delta_map != ~0)
break;
delta_run += 32;
}
delta_run += __ffs(~delta_map) - 1;
}
delta_run += 2;
break;
}
literal_run += 32;
delta_map = s->u.intrle.delta_map[(literal_run >> 5)];
}
literal_run = min(literal_run, maxvals);
s->u.intrle.literal_run = literal_run;
s->u.intrle.delta_run = min(delta_run, maxvals - literal_run);
}
__syncthreads();
literal_run = s->u.intrle.literal_run;
// Find minimum and maximum values
if (literal_run > 0)
{
// Find min & max
T vmin, vmax;
uint32_t literal_mode, literal_w;
if (t < literal_run)
{
vmin = vmax = v0;
}
else
{
intrle_minmax(vmax, vmin);
}
vmin = min(vmin, (T)SHFL_XOR(vmin, 1));
vmin = min(vmin, (T)SHFL_XOR(vmin, 2));
vmin = min(vmin, (T)SHFL_XOR(vmin, 4));
vmin = min(vmin, (T)SHFL_XOR(vmin, 8));
vmin = min(vmin, (T)SHFL_XOR(vmin, 16));
vmax = max(vmax, (T)SHFL_XOR(vmax, 1));
vmax = max(vmax, (T)SHFL_XOR(vmax, 2));
vmax = max(vmax, (T)SHFL_XOR(vmax, 4));
vmax = max(vmax, (T)SHFL_XOR(vmax, 8));
vmax = max(vmax, (T)SHFL_XOR(vmax, 16));
if (!(t & 0x1f))
{
s->u.intrle.scratch.u64[(t >> 5) * 2 + 0] = vmin;
s->u.intrle.scratch.u64[(t >> 5) * 2 + 1] = vmax;
}
__syncthreads();
if (t < 32)
{
vmin = (T)s->u.intrle.scratch.u64[(t & 0xf) * 2 + 0];
vmax = (T)s->u.intrle.scratch.u64[(t & 0xf) * 2 + 1];
vmin = min(vmin, (T)SHFL_XOR(vmin, 1));
vmin = min(vmin, (T)SHFL_XOR(vmin, 2));
vmin = min(vmin, (T)SHFL_XOR(vmin, 4));
vmin = min(vmin, (T)SHFL_XOR(vmin, 8));
vmax = max(vmax, (T)SHFL_XOR(vmax, 1));
vmax = max(vmax, (T)SHFL_XOR(vmax, 2));
vmax = max(vmax, (T)SHFL_XOR(vmax, 4));
vmax = max(vmax, (T)SHFL_XOR(vmax, 8));
if (t == 0)
{
uint32_t mode1_w, mode2_w;
T vrange_mode1, vrange_mode2;
s->u.intrle.scratch.u64[0] = (uint64_t)vmin;
if (sizeof(T) > 4)
{
vrange_mode1 = (is_signed) ? max(zigzag64(vmin), zigzag64(vmax)) : vmax;
vrange_mode2 = vmax - vmin;
mode1_w = 8 - min(CountLeadingBytes64(vrange_mode1), 7);
mode2_w = 8 - min(CountLeadingBytes64(vrange_mode2), 7);
}
else
{
vrange_mode1 = (is_signed) ? max(zigzag32(vmin), zigzag32(vmax)) : vmax;
vrange_mode2 = vmax - vmin;
mode1_w = 4 - min(CountLeadingBytes32(vrange_mode1), 3);
mode2_w = 4 - min(CountLeadingBytes32(vrange_mode2), 3);
}
// Decide between mode1 & mode2 (also mode3 for length=2 repeat)
if (vrange_mode2 == 0 && mode1_w > 1)
{
// Should only occur if literal_run==2 (otherwise would have resulted in repeat_run >= 3)
uint32_t bytecnt = 2;
dst[0] = 0xC0 + ((literal_run - 1) >> 8);
dst[1] = (literal_run - 1) & 0xff;
bytecnt += StoreVarint(dst + 2, vrange_mode1);
dst[bytecnt++] = 0; // Zero delta
s->u.intrle.literal_mode = 3;
s->u.intrle.literal_w = bytecnt;
}
else
{
uint32_t range, w;
if (mode1_w > mode2_w && (literal_run - 1) * (mode1_w - mode2_w) > 4)
{
s->u.intrle.literal_mode = 2;
w = mode2_w;
range = (uint32_t)vrange_mode2;
}
else
{
s->u.intrle.literal_mode = 1;
w = mode1_w;
range = (uint32_t)vrange_mode1;
}
if (w == 1)
w = (range >= 16) ? w << 3 : (range >= 4) ? 4 : (range >= 2) ? 2 : 1;
else
w <<= 3; // bytes -> bits
s->u.intrle.literal_w = w;
}
}
}
__syncthreads();
vmin = (T)s->u.intrle.scratch.u64[0];
literal_mode = s->u.intrle.literal_mode;
literal_w = s->u.intrle.literal_w;
if (literal_mode == 1)
{
// Direct mode
if (!t)
{
dst[0] = 0x40 + ((literal_w < 8) ? literal_w - 1 : kByteLengthToRLEv2_W[literal_w >> 3]) * 2 + ((literal_run - 1) >> 8);
dst[1] = (literal_run - 1) & 0xff;
}
dst += 2;
if (t < literal_run && is_signed)
{
if (sizeof(T) > 4)
v0 = zigzag64(v0);
else
v0 = zigzag32(v0);
}
if (literal_w < 8)
StoreBitsBigEndian(dst, (uint32_t)v0, literal_w, literal_run, t);
else if (t < literal_run)
StoreBytesBigEndian(dst + t * (literal_w >> 3), v0, (literal_w >> 3));
}
else if (literal_mode == 2)
{
// Patched base mode
if (!t)
{
uint32_t bw, pw = 1, pll, pgw = 1, bv_scale = (is_signed) ? 0 : 1;
vmax = (is_signed) ? ((vmin < 0) ? -vmin : vmin) * 2 : vmin;
bw = (sizeof(T) > 4) ? (8 - min(CountLeadingBytes64(vmax << bv_scale), 7)) : (4 - min(CountLeadingBytes32(vmax << bv_scale), 3));
#if ZERO_PLL_WAR
// Insert a dummy zero patch
pll = 1;
dst[4 + bw + ((literal_run * literal_w + 7) >> 3) + 0] = 0;
dst[4 + bw + ((literal_run * literal_w + 7) >> 3) + 1] = 0;
#else
pll = 0;
#endif
dst[0] = 0x80 + ((literal_w < 8) ? literal_w - 1 : kByteLengthToRLEv2_W[literal_w >> 3]) * 2 + ((literal_run - 1) >> 8);
dst[1] = (literal_run - 1) & 0xff;
dst[2] = ((bw - 1) << 5) | kByteLengthToRLEv2_W[pw];
dst[3] = ((pgw - 1) << 5) | pll;
if (is_signed)
{
vmax >>= 1;
vmax |= vmin & ((T)1 << (bw*8-1));
}
StoreBytesBigEndian(dst + 4, vmax, bw);
s->u.intrle.hdr_bytes = 4 + bw;
s->u.intrle.pl_bytes = (pll * (pw * 8 + pgw) + 7) >> 3;
}
__syncthreads();
dst += s->u.intrle.hdr_bytes;
v0 -= (t < literal_run) ? vmin : 0;
if (literal_w < 8)
StoreBitsBigEndian(dst, (uint32_t)v0, literal_w, literal_run, t);
else if (t < literal_run)
StoreBytesBigEndian(dst + t * (literal_w >> 3), v0, (literal_w >> 3));
dst += s->u.intrle.pl_bytes;
}
else
{
// Delta mode
dst += literal_w;
literal_w = 0;
}
dst += (literal_run * literal_w + 7) >> 3;
numvals -= literal_run;
inpos += literal_run;
out_cnt += literal_run;
__syncthreads();
}
delta_run = s->u.intrle.delta_run;
if (delta_run > 0)
{
if (t == literal_run)
{
int64_t delta = (int64_t)v1 - (int64_t)v0;
uint64_t delta_base = (is_signed) ? (sizeof(T) > 4) ? zigzag64(v0) : zigzag32(v0) : v0;
if (delta == 0 && delta_run >= 3 && delta_run <= 10)
{
// Short repeat
uint32_t delta_bw = 8 - min(CountLeadingBytes64(delta_base), 7);
dst[0] = ((delta_bw - 1) << 3) + (delta_run - 3);
for (uint32_t i = 0, b = delta_bw * 8; i < delta_bw; i++)
{
b -= 8;
dst[1 + i] = static_cast<uint8_t>(delta_base >> b);
}
s->u.intrle.hdr_bytes = 1 + delta_bw;
}
else
{
// Delta
uint64_t delta_u = zigzag64(delta);
uint32_t bytecnt = 2;
dst[0] = 0xC0 + ((delta_run - 1) >> 8);
dst[1] = (delta_run - 1) & 0xff;
bytecnt += StoreVarint(dst + bytecnt, delta_base);
bytecnt += StoreVarint(dst + bytecnt, delta_u);
s->u.intrle.hdr_bytes = bytecnt;
}
}
__syncthreads();
dst += s->u.intrle.hdr_bytes;
numvals -= delta_run;
inpos += delta_run;
out_cnt += delta_run;
}
}
if (!t)
{
s->strm_pos[cid] = static_cast<uint32_t>(dst - s->chunk.streams[cid]);
}
__syncthreads();
return out_cnt;
}
/**
* @brief Store a group of strings as a single concatenated string
*
* @param[in] dst destination buffer
* @param[in] strenc string encoder state
* @param[in] len(t) string length (per thread)
* @param[in] t thread id
*
**/
static __device__ void StoreStringData(uint8_t *dst, strdata_enc_state_s *strenc, uint32_t len, int t)
{
// Start with summing up all the lengths
uint32_t pos = len;
uint32_t wt = t & 0x1f;
for (uint32_t n = 1; n<32; n <<= 1)
{
uint32_t tmp = SHFL(pos, (wt & ~n) | (n - 1));
pos += (wt & n) ? tmp : 0;
}
if (wt == 0x1f)
{
strenc->lengths_red[t >> 5] = pos;
}
dst += pos - len;
__syncthreads();
if (t < 32)
{
uint32_t wlen = (wt < 16) ? strenc->lengths_red[wt] : 0;
uint32_t wpos = wlen;
for (uint32_t n = 1; n<16; n <<= 1)
{
uint32_t tmp = SHFL(wpos, (wt & ~n) | (n - 1));
wpos += (wt & n) ? tmp : 0;
}
if (wt < 16)
{
strenc->lengths_red[wt] = wpos - wlen;
}
if (wt == 0xf)
{
strenc->char_count = wpos; // Update stream position
}
}
__syncthreads();
// TBD: Might be more efficient to loop over 4 strings and copy 8 consecutive character at a time
// rather than have each thread to a memcpy
if (len > 0)
{
memcpy(dst + strenc->lengths_red[t >> 5], strenc->str_data[t], len);
}
}
/**
* @brief In-place conversion from lengths to positions
*
* @param[in] vals input values
* @param[in] numvals number of values
* @param[in] t thread id
*
**/
template<class T>
inline __device__ void lengths_to_positions(volatile T *vals, uint32_t numvals, unsigned int t)
{
for (uint32_t n = 1; n<numvals; n <<= 1)
{
__syncthreads();
if ((t & n) && (t < numvals))
vals[t] += vals[(t & ~n) | (n - 1)];
}
}
/**
* @brief Timestamp scale table (powers of 10)
**/
static const __device__ __constant__ int32_t kTimeScale[10] =
{
1000000000, 100000000, 10000000, 1000000, 100000, 10000, 1000, 100, 10, 1
};
/**
* @brief Encode column data
*
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[in] num_columns Number of columns
* @param[in] num_rowgroups Number of row groups
*
**/
// blockDim {512,1,1}
extern "C" __global__ void __launch_bounds__(512)
gpuEncodeOrcColumnData(EncChunk *chunks, uint32_t num_columns, uint32_t num_rowgroups)
{
__shared__ __align__(16) orcenc_state_s state_g;
orcenc_state_s * const s = &state_g;
uint32_t col_id = blockIdx.x;
uint32_t group_id = blockIdx.y;
int t = threadIdx.x;
if (t < sizeof(EncChunk) / sizeof(uint32_t))
{
((volatile uint32_t *)&s->chunk)[t] = ((const uint32_t *)&chunks[group_id * num_columns + col_id])[t];
}
if (t < CI_NUM_STREAMS)
{
s->strm_pos[t] = 0;
}
__syncthreads();
if (!t)
{
s->cur_row = 0;
s->present_rows = 0;
s->present_out = 0;
s->numvals = 0;
s->numlengths = 0;
s->nnz = 0;
// Dictionary data is encoded in a separate kernel
if (s->chunk.encoding_kind == DICTIONARY_V2)
{
s->strm_pos[CI_DATA2] = s->chunk.strm_len[CI_DATA2];
s->strm_pos[CI_DICTIONARY] = s->chunk.strm_len[CI_DICTIONARY];
}
}
__syncthreads();
while (s->cur_row < s->chunk.num_rows || s->numvals + s->numlengths != 0)
{
// Encode valid map
if (s->present_rows < s->chunk.num_rows)
{
uint32_t present_rows = s->present_rows;
uint32_t nrows = min(s->chunk.num_rows - present_rows, 512 * 8 - (present_rows - (min(s->cur_row, s->present_out) & ~7)));
uint32_t nrows_out;
if (t*8 < nrows)
{
uint32_t row = s->chunk.start_row + present_rows + t * 8;
uint8_t valid = 0;
if (row < s->chunk.valid_rows)
{
const uint8_t *valid_map_base = reinterpret_cast<const uint8_t *>(s->chunk.valid_map_base);
valid = (valid_map_base) ? valid_map_base[row >> 3] : 0xff;
if (row + 7 > s->chunk.valid_rows)
{
valid = valid & ((1 << (s->chunk.valid_rows & 7)) - 1);
}
}
s->valid_buf[(row >> 3) & 0x1ff] = valid;
}
__syncthreads();
present_rows += nrows;
if (!t)
{
s->present_rows = present_rows;
}
// RLE encode the present stream
nrows_out = present_rows - s->present_out; // Should always be a multiple of 8 except at the end of the last row group
if (nrows_out > ((present_rows < s->chunk.num_rows) ? 130 * 8 : 0))
{
uint32_t present_out = s->present_out;
if (s->chunk.strm_id[CI_PRESENT] >= 0)
{
uint32_t flush = (present_rows < s->chunk.num_rows) ? 0 : 7;
nrows_out = (nrows_out + flush) >> 3;
nrows_out = ByteRLE<CI_PRESENT, 0x1ff>(s, s->valid_buf, (s->chunk.start_row + present_out) >> 3, nrows_out, flush, t) * 8;
}
__syncthreads();
if (!t)
{
s->present_out = min(present_out + nrows_out, present_rows);
}
}
__syncthreads();
}
// Fetch non-null values
if (!s->chunk.streams[CI_DATA])
{
// Pass-through
__syncthreads();
if (!t)
{
s->cur_row = s->present_rows;
s->strm_pos[CI_DATA] = s->cur_row * s->chunk.dtype_len;
}
__syncthreads();
}
else if (s->cur_row < s->present_rows)
{
uint32_t maxnumvals = (s->chunk.type_kind == BOOLEAN) ? 2048 : 1024;
uint32_t nrows = min(min(s->present_rows - s->cur_row, maxnumvals - max(s->numvals, s->numlengths)), 512);
uint32_t row = s->chunk.start_row + s->cur_row + t;
uint32_t valid = (t < nrows) ? (s->valid_buf[(row >> 3) & 0x1ff] >> (row & 7)) & 1 : 0;
s->buf.u32[t] = valid;
// TODO: Could use a faster reduction relying on _popc() for the initial phase
lengths_to_positions(s->buf.u32, 512, t);
__syncthreads();
if (valid)
{
int nz_idx = (s->nnz + s->buf.u32[t] - 1) & (maxnumvals - 1);
const uint8_t *base = reinterpret_cast<const uint8_t *>(s->chunk.column_data_base);
switch (s->chunk.type_kind)
{
case INT:
case DATE:
case FLOAT:
s->vals.u32[nz_idx] = reinterpret_cast<const uint32_t *>(base)[row];
break;
case DOUBLE:
case LONG:
s->vals.u64[nz_idx] = reinterpret_cast<const uint64_t *>(base)[row];
break;
case SHORT:
s->vals.u32[nz_idx] = reinterpret_cast<const uint16_t *>(base)[row];
break;
case BOOLEAN:
case BYTE:
s->vals.u8[nz_idx] = reinterpret_cast<const uint8_t *>(base)[row];
break;
case TIMESTAMP: {
int64_t ts = reinterpret_cast<const int64_t *>(base)[row];
int32_t ts_scale = kTimeScale[min(s->chunk.scale, 9)];
int64_t seconds = ts / ts_scale;
int32_t nanos = (ts - seconds * ts_scale);
if (nanos < 0)
{
seconds += 1;
nanos += ts_scale;
}
s->vals.i64[nz_idx] = seconds - kORCTimeToUTC;
if (nanos != 0)
{
// Trailing zeroes are encoded in the lower 3-bits
uint32_t zeroes = 0;
nanos *= kTimeScale[9 - min(s->chunk.scale, 9)];
if (!(nanos % 100))
{
nanos /= 100;
zeroes = 1;
while (zeroes < 7 && !(nanos % 10))
{
nanos /= 10;
zeroes++;
}
}
nanos = (nanos << 3) + zeroes;
}
s->lengths.u32[nz_idx] = nanos;
break;
}
case STRING:
if (s->chunk.encoding_kind == DICTIONARY_V2)
{
uint32_t dict_idx = reinterpret_cast<const uint32_t *>(base)[row];
if (dict_idx > 0x7fffffffu)
dict_idx = reinterpret_cast<const uint32_t *>(base)[dict_idx & 0x7fffffffu];
s->vals.u32[nz_idx] = dict_idx;
}
else
{
const nvstrdesc_s *str_desc = reinterpret_cast<const nvstrdesc_s *>(base) + row;
const char *ptr = str_desc->ptr;
uint32_t count = static_cast<uint32_t>(str_desc->count);
s->u.strenc.str_data[s->buf.u32[t] - 1] = ptr;
s->lengths.u32[nz_idx] = count;
}
break;
default:
break;
}
}
__syncthreads();
if (s->chunk.type_kind == STRING && s->chunk.encoding_kind != DICTIONARY_V2)
{
// Store string data
uint32_t nz = s->buf.u32[511];
uint32_t nz_idx = (s->nnz + t) & 0x3ff;
uint32_t len = (t < nz && s->u.strenc.str_data[t]) ? s->lengths.u32[nz_idx] : 0;
StoreStringData(s->chunk.streams[CI_DATA] + s->strm_pos[CI_DATA], &s->u.strenc, len, t);
if (!t)
{
s->strm_pos[CI_DATA] += s->u.strenc.char_count;
}
__syncthreads();
}
else if (s->chunk.type_kind == BOOLEAN)
{
// bool8 -> 8x bool1
uint32_t nz = s->buf.u32[511];
uint8_t n = ((s->nnz + nz) - (s->nnz & ~7) + 7) >> 3;
if (t < n)
{
uint32_t idx8 = (s->nnz & ~7) + (t << 3);
s->lengths.u8[((s->nnz >> 3) + t) & 0x1ff] = ((s->vals.u8[(idx8 + 0) & 0x7ff] & 1) << 7)
| ((s->vals.u8[(idx8 + 1) & 0x7ff] & 1) << 6)
| ((s->vals.u8[(idx8 + 2) & 0x7ff] & 1) << 5)
| ((s->vals.u8[(idx8 + 3) & 0x7ff] & 1) << 4)
| ((s->vals.u8[(idx8 + 4) & 0x7ff] & 1) << 3)
| ((s->vals.u8[(idx8 + 5) & 0x7ff] & 1) << 2)
| ((s->vals.u8[(idx8 + 6) & 0x7ff] & 1) << 1)
| ((s->vals.u8[(idx8 + 7) & 0x7ff] & 1) << 0);
}
__syncthreads();
}
if (!t)
{
uint32_t nz = s->buf.u32[511];
s->nnz += nz;
s->numvals += nz;
s->numlengths += (s->chunk.type_kind == TIMESTAMP || (s->chunk.type_kind == STRING && s->chunk.encoding_kind != DICTIONARY_V2)) ? nz : 0;
s->cur_row += nrows;
}
__syncthreads();
// Encode values
if (s->numvals > 0)
{
uint32_t flush = (s->cur_row == s->chunk.num_rows) ? 7 : 0, n;
switch (s->chunk.type_kind)
{
case SHORT:
case INT:
case DATE:
n = IntegerRLE<CI_DATA, int32_t, true, 0x3ff>(s, s->vals.i32, s->nnz - s->numvals, s->numvals, flush, t);
break;
case LONG:
case TIMESTAMP:
n = IntegerRLE<CI_DATA, int64_t, true, 0x3ff>(s, s->vals.i64, s->nnz - s->numvals, s->numvals, flush, t);
break;
case BYTE:
n = ByteRLE<CI_DATA, 0x3ff>(s, s->vals.u8, s->nnz - s->numvals, s->numvals, flush, t);
break;
case BOOLEAN:
n = ByteRLE<CI_DATA, 0x1ff>(s, s->lengths.u8, (s->nnz - s->numvals + flush) >> 3, (s->numvals + flush) >> 3, flush, t) * 8;
break;
case FLOAT:
StoreBytes<CI_DATA, 0xfff>(s, s->vals.u8, (s->nnz - s->numvals) * 4, s->numvals * 4, t);
n = s->numvals;
break;
case DOUBLE:
StoreBytes<CI_DATA, 0x1fff>(s, s->vals.u8, (s->nnz - s->numvals) * 8, s->numvals * 8, t);
n = s->numvals;
break;
case STRING:
if (s->chunk.encoding_kind == DICTIONARY_V2) {
n = IntegerRLE<CI_DATA, uint32_t, false, 0x3ff>(s, s->vals.u32, s->nnz - s->numvals, s->numvals, flush, t);
} else {
n = s->numvals;
}
break;
default:
n = s->numvals;
break;
}
__syncthreads();
if (!t)
{
s->numvals -= min(n, s->numvals);
}
}
// Encode secondary stream values
if (s->numlengths > 0)
{
uint32_t flush = (s->cur_row == s->chunk.num_rows) ? 1 : 0, n;
switch (s->chunk.type_kind)
{
case TIMESTAMP:
case STRING:
n = IntegerRLE<CI_DATA2, uint32_t, false, 0x3ff>(s, s->lengths.u32, s->nnz - s->numlengths, s->numlengths, flush, t);
break;
default:
n = s->numlengths;
break;
}
__syncthreads();
if (!t)
{
s->numlengths -= min(n, s->numlengths);
}
}
}
__syncthreads();
}
__syncthreads();
if (t <= CI_PRESENT && s->chunk.strm_id[t] >= 0)
{
// Update actual compressed length
chunks[group_id * num_columns + col_id].strm_len[t] = s->strm_pos[t];
if (!s->chunk.streams[t])
{
chunks[group_id * num_columns + col_id].streams[t] = reinterpret_cast<uint8_t *>(const_cast<void *>(s->chunk.column_data_base)) + s->chunk.start_row * s->chunk.dtype_len;
}
}
}
/**
* @brief Encode column dictionaries
*
* @param[in] stripes Stripe dictionaries device array [stripe][string_column]
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[in] num_columns Number of columns
*
**/
// blockDim {512,1,1}
extern "C" __global__ void __launch_bounds__(512)
gpuEncodeStringDictionaries(StripeDictionary *stripes, EncChunk *chunks, uint32_t num_columns)
{
__shared__ __align__(16) orcenc_state_s state_g;
orcenc_state_s * const s = &state_g;
uint32_t stripe_id = blockIdx.x;
uint32_t cid = (blockIdx.y) ? CI_DICTIONARY : CI_DATA2;
uint32_t chunk_id;
int t = threadIdx.x;
const nvstrdesc_s *str_desc;
const uint32_t *dict_data;
if (t < sizeof(StripeDictionary) / sizeof(uint32_t))
{
((volatile uint32_t *)&s->u.dict_stripe)[t] = ((const uint32_t *)&stripes[stripe_id])[t];
}
__syncthreads();
chunk_id = s->u.dict_stripe.start_chunk * num_columns + s->u.dict_stripe.column_id;
if (t < sizeof(EncChunk) / sizeof(uint32_t))
{
((volatile uint32_t *)&s->chunk)[t] = ((const uint32_t *)&chunks[chunk_id])[t];
}
if (t == 0)
{
s->strm_pos[cid] = 0;
s->numlengths = 0;
s->nrows = s->u.dict_stripe.num_strings;
s->cur_row = 0;
}
str_desc = reinterpret_cast<const nvstrdesc_s *>(s->u.dict_stripe.column_data_base);
dict_data = s->u.dict_stripe.dict_data;
__syncthreads();
if (s->chunk.encoding_kind != DICTIONARY_V2)
{
return; // This column isn't using dictionary encoding -> bail out
}
while (s->cur_row < s->nrows || s->numlengths != 0)
{
uint32_t numvals = min(s->nrows - s->cur_row, min(1024 - s->numlengths, 512));
uint32_t string_idx = (t < numvals) ? dict_data[s->cur_row + t] : 0;
if (cid == CI_DICTIONARY)
{
// Encoding string contents
const char *ptr = (t < numvals) ? str_desc[string_idx].ptr : 0;
uint32_t count = (t < numvals) ? static_cast<uint32_t>(str_desc[string_idx].count) : 0;
s->u.strenc.str_data[t] = ptr;
StoreStringData(s->chunk.streams[CI_DICTIONARY] + s->strm_pos[CI_DICTIONARY], &s->u.strenc, (ptr) ? count : 0, t);
if (!t)
{
s->strm_pos[CI_DICTIONARY] += s->u.strenc.char_count;
}
}
else
{
// Encoding string lengths
uint32_t count = (t < numvals) ? static_cast<uint32_t>(str_desc[string_idx].count) : 0;
uint32_t nz_idx = (s->cur_row + t) & 0x3ff;
if (t < numvals)
s->lengths.u32[nz_idx] = count;
__syncthreads();
if (s->numlengths + numvals > 0)
{
uint32_t flush = (s->cur_row + numvals == s->nrows) ? 1 : 0;
uint32_t n = IntegerRLE<CI_DATA2, uint32_t, false, 0x3ff>(s, s->lengths.u32, s->cur_row, s->numlengths + numvals, flush, t);
__syncthreads();
if (!t)
{
s->numlengths += numvals;
s->numlengths -= min(n, s->numlengths);
}
}
}
if (t == 0)
{
s->cur_row += numvals;
}
__syncthreads();
}
if (t == 0)
{
chunks[chunk_id].strm_len[cid] = s->strm_pos[cid];
}
}
/**
* @brief Merge chunked column data into a single contiguous stream
*
* @param[in] strm_desc StripeStream device array [stripe][stream]
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[in] num_stripe_streams Total number of streams
* @param[in] num_columns Number of columns
*
**/
// blockDim {1024,1,1}
extern "C" __global__ void __launch_bounds__(1024)
gpuCompactOrcDataStreams(StripeStream *strm_desc, EncChunk *chunks, uint32_t num_columns)
{
__shared__ __align__(16) StripeStream ss;
__shared__ __align__(16) EncChunk ck0;
__shared__ uint8_t * volatile ck_curptr_g;
__shared__ uint32_t volatile ck_curlen_g;
uint32_t strm_id = blockIdx.x;
uint32_t ck0_id, cid;
uint32_t t = threadIdx.x;
uint8_t *dst_ptr;
if (t < sizeof(StripeStream) / sizeof(uint32_t))
{
((volatile uint32_t *)&ss)[t] = ((const uint32_t *)&strm_desc[strm_id])[t];
}
__syncthreads();
ck0_id = ss.first_chunk_id;
if (t < sizeof(EncChunk) / sizeof(uint32_t))
{
((volatile uint32_t *)&ck0)[t] = ((const uint32_t *)&chunks[ck0_id])[t];
}
__syncthreads();
cid = ss.strm_type;
dst_ptr = ck0.streams[cid] + ck0.strm_len[cid];
for (uint32_t g = 1; g < ss.num_chunks; g++)
{
uint8_t *src_ptr;
uint32_t len;
if (t == 0)
{
src_ptr = chunks[ck0_id + g * num_columns].streams[cid];
len = chunks[ck0_id + g * num_columns].strm_len[cid];
if (src_ptr != dst_ptr)
{
chunks[ck0_id + g * num_columns].streams[cid] = dst_ptr;
}
ck_curptr_g = src_ptr;
ck_curlen_g = len;
}
__syncthreads();
src_ptr = ck_curptr_g;
len = ck_curlen_g;
if (len > 0 && src_ptr != dst_ptr)
{
for (uint32_t i = 0; i < len; i += 1024)
{
uint8_t v = (i + t < len) ? src_ptr[i + t] : 0;
__syncthreads();
if (i + t < len)
{
dst_ptr[i + t] = v;
}
}
}
dst_ptr += len;
__syncthreads();
}
if (!t)
{
strm_desc[strm_id].stream_size = dst_ptr - ck0.streams[cid];
}
}
/**
* @brief Initializes compression input/output structures
*
* @param[in] strm_desc StripeStream device array [stripe][stream]
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[out] comp_in Per-block compression input parameters
* @param[out] comp_out Per-block compression status
* @param[in] compressed_bfr Compression output buffer
* @param[in] comp_blk_size Compression block size
*
**/
// blockDim {256,1,1}
extern "C" __global__ void __launch_bounds__(256)
gpuInitCompressionBlocks(StripeStream *strm_desc, EncChunk *chunks, gpu_inflate_input_s *comp_in, gpu_inflate_status_s *comp_out, uint8_t *compressed_bfr, uint32_t comp_blk_size)
{
__shared__ __align__(16) StripeStream ss;
__shared__ uint8_t * volatile uncomp_base_g;
uint32_t strm_id = blockIdx.x;
uint32_t t = threadIdx.x;
uint32_t num_blocks;
uint8_t *src, *dst;
if (t < sizeof(StripeStream) / sizeof(uint32_t))
{
((volatile uint32_t *)&ss)[t] = ((const uint32_t *)&strm_desc[strm_id])[t];
}
__syncthreads();
if (t == 0)
{
uncomp_base_g = chunks[ss.first_chunk_id].streams[ss.strm_type];
}
__syncthreads();
src = uncomp_base_g;
dst = compressed_bfr + ss.bfr_offset;
num_blocks = (ss.stream_size > 0) ? (ss.stream_size - 1) / comp_blk_size + 1 : 1;
for (uint32_t b = t; b < num_blocks; b += 256)
{
gpu_inflate_input_s *blk_in = &comp_in[ss.first_block + b];
gpu_inflate_status_s *blk_out = &comp_out[ss.first_block + b];
uint32_t blk_size = min(comp_blk_size, ss.stream_size - min(b * comp_blk_size, ss.stream_size));
blk_in->srcDevice = src + b * comp_blk_size;
blk_in->srcSize = blk_size;
blk_in->dstDevice = dst + b * (3 + comp_blk_size) + 3;
blk_in->dstSize = blk_size + 3;
blk_out->bytes_written = blk_size;
blk_out->status = 1;
blk_out->reserved = 0;
}
}
/**
* @brief Compacts compressed blocks in a single contiguous stream, and update 3-byte block length fields
*
* @param[in,out] strm_desc StripeStream device array [stripe][stream]
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[in] comp_in Per-block compression input parameters
* @param[in] comp_out Per-block compression status
* @param[in] compressed_bfr Compression output buffer
* @param[in] comp_blk_size Compression block size
*
**/
// blockDim {1024,1,1}
extern "C" __global__ void __launch_bounds__(1024)
gpuCompactCompressedBlocks(StripeStream *strm_desc, gpu_inflate_input_s *comp_in, gpu_inflate_status_s *comp_out, uint8_t *compressed_bfr, uint32_t comp_blk_size)
{
__shared__ __align__(16) StripeStream ss;
__shared__ const uint8_t * volatile comp_src_g;
__shared__ uint32_t volatile comp_len_g;
uint32_t strm_id = blockIdx.x;
uint32_t t = threadIdx.x;
uint32_t num_blocks, b, blk_size;
const uint8_t *src;
uint8_t *dst;
if (t < sizeof(StripeStream) / sizeof(uint32_t))
{
((volatile uint32_t *)&ss)[t] = ((const uint32_t *)&strm_desc[strm_id])[t];
}
__syncthreads();
num_blocks = (ss.stream_size > 0) ? (ss.stream_size - 1) / comp_blk_size + 1 : 0;
dst = compressed_bfr + ss.bfr_offset;
b = 0;
do
{
if (t == 0)
{
gpu_inflate_input_s *blk_in = &comp_in[ss.first_block + b];
gpu_inflate_status_s *blk_out = &comp_out[ss.first_block + b];
uint32_t src_len = min(comp_blk_size, ss.stream_size - min(b * comp_blk_size, ss.stream_size));
uint32_t dst_len = (blk_out->status == 0) ? blk_out->bytes_written : src_len;
uint32_t blk_size24;
if (dst_len >= src_len)
{
// Copy from uncompressed source
src = reinterpret_cast<const uint8_t *>(blk_in->srcDevice);
blk_out->bytes_written = src_len;
dst_len = src_len;
blk_size24 = dst_len * 2 + 1;
}
else
{
// Compressed block
src = reinterpret_cast<const uint8_t *>(blk_in->dstDevice);
blk_size24 = dst_len * 2 + 0;
}
dst[0] = static_cast<uint8_t>(blk_size24 >> 0);
dst[1] = static_cast<uint8_t>(blk_size24 >> 8);
dst[2] = static_cast<uint8_t>(blk_size24 >> 16);
comp_src_g = src;
comp_len_g = dst_len;
}
__syncthreads();
src = comp_src_g;
blk_size = comp_len_g;
dst += 3; // skip over length written by thread0
if (src != dst)
{
for (uint32_t i = 0; i < blk_size; i += 1024)
{
uint8_t v = (i + t < blk_size) ? src[i + t] : 0;
__syncthreads();
if (i + t < blk_size)
{
dst[i + t] = v;
}
}
}
dst += blk_size;
__syncthreads();
} while (++b < num_blocks);
// Update stripe stream with the compressed size
if (t == 0)
{
strm_desc[strm_id].stream_size = static_cast<uint32_t>(dst - (compressed_bfr + ss.bfr_offset));
}
}
/**
* @brief Launches kernel for encoding column data
*
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[in] num_columns Number of columns
* @param[in] num_rowgroups Number of row groups
* @param[in] stream CUDA stream to use, default 0
*
* @return cudaSuccess if successful, a CUDA error code otherwise
**/
cudaError_t EncodeOrcColumnData(EncChunk *chunks, uint32_t num_columns, uint32_t num_rowgroups, cudaStream_t stream)
{
dim3 dim_block(512, 1); // 512 threads per chunk
dim3 dim_grid(num_columns, num_rowgroups);
gpuEncodeOrcColumnData <<< dim_grid, dim_block, 0, stream >>>(chunks, num_columns, num_rowgroups);
return cudaSuccess;
}
/**
* @brief Launches kernel for encoding column dictionaries
*
* @param[in] stripes Stripe dictionaries device array [stripe][string_column]
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[in] num_string_columns Number of string columns
* @param[in] num_columns Number of columns
* @param[in] num_stripes Number of stripes
* @param[in] stream CUDA stream to use, default 0
*
* @return cudaSuccess if successful, a CUDA error code otherwise
**/
cudaError_t EncodeStripeDictionaries(StripeDictionary *stripes, EncChunk *chunks, uint32_t num_string_columns, uint32_t num_columns, uint32_t num_stripes, cudaStream_t stream)
{
dim3 dim_block(512, 1); // 512 threads per dictionary
dim3 dim_grid(num_string_columns * num_stripes, 2);
gpuEncodeStringDictionaries <<< dim_grid, dim_block, 0, stream >>>(stripes, chunks, num_columns);
return cudaSuccess;
}
/**
* @brief Launches kernel for compacting chunked column data prior to compression
*
* @param[in] strm_desc StripeStream device array [stripe][stream]
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[in] num_stripe_streams Total number of streams
* @param[in] num_columns Number of columns
* @param[in] stream CUDA stream to use, default 0
*
* @return cudaSuccess if successful, a CUDA error code otherwise
**/
cudaError_t CompactOrcDataStreams(StripeStream *strm_desc, EncChunk *chunks, uint32_t num_stripe_streams, uint32_t num_columns, cudaStream_t stream)
{
dim3 dim_block(1024, 1);
dim3 dim_grid(num_stripe_streams, 1);
gpuCompactOrcDataStreams <<< dim_grid, dim_block, 0, stream >>>(strm_desc, chunks, num_columns);
return cudaSuccess;
}
/**
* @brief Launches kernel(s) for compressing data streams
*
* @param[in] compressed_data Output compressed blocks
* @param[in] strm_desc StripeStream device array [stripe][stream]
* @param[in] chunks EncChunk device array [rowgroup][column]
* @param[out] comp_in Per-block compression input parameters
* @param[out] comp_out Per-block compression status
* @param[in] num_stripe_streams Total number of streams
* @param[in] num_compressed_blocks Total number of compressed blocks
* @param[in] compression Type of compression
* @param[in] comp_blk_size Compression block size
* @param[in] stream CUDA stream to use, default 0
*
* @return cudaSuccess if successful, a CUDA error code otherwise
**/
cudaError_t CompressOrcDataStreams(uint8_t *compressed_data, StripeStream *strm_desc, EncChunk *chunks, gpu_inflate_input_s *comp_in,
gpu_inflate_status_s *comp_out, uint32_t num_stripe_streams, uint32_t num_compressed_blocks, CompressionKind compression, uint32_t comp_blk_size, cudaStream_t stream)
{
dim3 dim_block_init(256, 1);
dim3 dim_grid(num_stripe_streams, 1);
gpuInitCompressionBlocks <<< dim_grid, dim_block_init, 0, stream >>>(strm_desc, chunks, comp_in, comp_out, compressed_data, comp_blk_size);
if (compression == SNAPPY)
{
gpu_snap(comp_in, comp_out, num_compressed_blocks, stream);
}
dim3 dim_block_compact(1024, 1);
gpuCompactCompressedBlocks <<< dim_grid, dim_block_compact, 0, stream >>>(strm_desc, comp_in, comp_out, compressed_data, comp_blk_size);
return cudaSuccess;
}
} // namespace gpu
} // namespace orc
} // namespace io
} // namespace cudf
|
dd976dedc1b2b182f02b3bdb7db264d85b649c4a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*********************************************************************************/
//
// Polybench kernels implementation on CUDA GPU
//
// Computer & Information Science, University of Delaware
// Author(s): Sudhee Ayalasomayajula ([email protected])
// John Cavazos ([email protected])
// Scott Grauer Gray([email protected])
// Robert Searles ([email protected])
// Lifan Xu ([email protected])
//
// Contact(s): Lifan Xu ([email protected])
// Reference(s):
//
/*********************************************************************************/
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include "../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 2.5
#define GPU_DEVICE 0
/* Problem size. */
#define TSTEPS 1
#define N 1024
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 256
#define DIM_THREAD_BLOCK_Y 1
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void adi(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X)
{
for (int t = 0; t < TSTEPS; t++)
{
for (int i1 = 0; i1 < N; i1++)
{
for (int i2 = 1; i2 < N; i2++)
{
X[i1*N + i2] = X[i1*N + i2] - X[i1*N + (i2-1)] * A[i1*N + i2] / B[i1*N + (i2-1)];
B[i1*N + i2] = B[i1*N + i2] - A[i1*N + i2] * A[i1*N + i2] / B[i1*N + (i2-1)];
}
}
for (int i1 = 0; i1 < N; i1++)
{
X[i1*N + (N-1)] = X[i1*N + (N-1)] / B[i1*N + (N-1)];
}
for (int i1 = 0; i1 < N; i1++)
{
for (int i2 = 0; i2 < N-2; i2++)
{
X[i1*N + (N-i2-2)] = (X[i1*N + (N-2-i2)] - X[i1*N + (N-2-i2-1)] * A[i1*N + (N-i2-3)]) / B[i1*N + (N-3-i2)];
}
}
for (int i1 = 1; i1 < N; i1++)
{
for (int i2 = 0; i2 < N; i2++)
{
X[i1*N + i2] = X[i1*N + i2] - X[(i1-1)*N + i2] * A[i1*N + i2] / B[(i1-1)*N + i2];
B[i1*N + i2] = B[i1*N + i2] - A[i1*N + i2] * A[i1*N + i2] / B[(i1-1)*N + i2];
}
}
for (int i2 = 0; i2 < N; i2++)
{
X[(N-1)*N + i2] = X[(N-1)*N + i2] / B[(N-1)*N + i2];
}
for (int i1 = 0; i1 < N-2; i1++)
{
for (int i2 = 0; i2 < N; i2++)
{
X[(N-2-i1)*N + i2] = (X[(N-2-i1)*N + i2] - X[(N-i1-3)*N + i2] * A[(N-3-i1)*N + i2]) / B[(N-2-i1)*N + i2];
}
}
}
}
void init_array(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X)
{
int i, j;
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
X[i*N + j] = ((DATA_TYPE) i*(j+1) + 1) / N;
A[i*N + j] = ((DATA_TYPE) (i-1)*(j+4) + 2) / N;
B[i*N + j] = ((DATA_TYPE) (i+3)*(j+7) + 3) / N;
}
}
}
void compareResults(DATA_TYPE* B_cpu, DATA_TYPE* B_fromGpu, DATA_TYPE* X_cpu, DATA_TYPE* X_fromGpu)
{
int i, j, fail;
fail = 0;
// Compare b and x output on cpu and gpu
for (i=0; i < N; i++)
{
for (j=0; j < N; j++)
{
if (percentDiff(B_cpu[i*N + j], B_fromGpu[i*N + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
//printf("1: %f\n 2: %f\n", B_cpu[i*N + j], B_fromGpu[i*N + j]);
}
}
}
for (i=0; i<N; i++)
{
for (j=0; j<(N); j++)
{
if (percentDiff(X_cpu[i*N + j], X_fromGpu[i*N + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
//printf("1: %f\n 2: %f\n", X_cpu[i*N + j], X_fromGpu[i*N + j]);
}
}
}
// Print results
printf("Number of misses: %d\n", fail);
}
void GPU_argv_init()
{
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
hipSetDevice( GPU_DEVICE );
}
__global__ void adi_kernel1(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X)
{
int i1 = blockIdx.x * blockDim.x + threadIdx.x;
if ((i1 < N))
{
for (int i2 = 1; i2 < N; i2++)
{
X[i1*N + i2] = X[i1*N + i2] - X[i1*N + (i2-1)] * A[i1*N + i2] / B[i1*N + (i2-1)];
B[i1*N + i2] = B[i1*N + i2] - A[i1*N + i2] * A[i1*N + i2] / B[i1*N + (i2-1)];
}
}
}
__global__ void adi_kernel2(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X)
{
int i1 = blockIdx.x * blockDim.x + threadIdx.x;
if ((i1 < N))
{
X[i1*N + (N-1)] = X[i1*N + (N-1)] / B[i1*N + (N-1)];
}
}
__global__ void adi_kernel3(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X)
{
int i1 = blockIdx.x * blockDim.x + threadIdx.x;
if ((i1 < N))
{
for (int i2 = 0; i2 < N-2; i2++)
{
X[i1*N + (N-i2-2)] = (X[i1*N + (N-2-i2)] - X[i1*N + (N-2-i2-1)] * A[i1*N + (N-i2-3)]) / B[i1*N + (N-3-i2)];
}
}
}
__global__ void adi_kernel4(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X, int i1)
{
int i2 = blockIdx.x * blockDim.x + threadIdx.x;
if ((i2 < N))
{
X[i1*N + i2] = X[i1*N + i2] - X[(i1-1)*N + i2] * A[i1*N + i2] / B[(i1-1)*N + i2];
B[i1*N + i2] = B[i1*N + i2] - A[i1*N + i2] * A[i1*N + i2] / B[(i1-1)*N + i2];
}
}
__global__ void adi_kernel5(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X)
{
int i2 = blockIdx.x * blockDim.x + threadIdx.x;
if ((i2 < N))
{
X[(N-1)*N + i2] = X[(N-1)*N + i2] / B[(N-1)*N + i2];
}
}
__global__ void adi_kernel6(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X, int i1)
{
int i2 = blockIdx.x * blockDim.x + threadIdx.x;
if ((i2 < N))
{
X[(N-2-i1)*N + i2] = (X[(N-2-i1)*N + i2] - X[(N-i1-3)*N + i2] * A[(N-3-i1)*N + i2]) / B[(N-2-i1)*N + i2];
}
}
void adiCuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X, DATA_TYPE* B_outputFromGpu, DATA_TYPE* X_outputFromGpu)
{
double t_start, t_end;
DATA_TYPE* A_gpu;
DATA_TYPE* B_gpu;
DATA_TYPE* X_gpu;
hipMalloc(&A_gpu, N * N * sizeof(DATA_TYPE));
hipMalloc(&B_gpu, N * N * sizeof(DATA_TYPE));
hipMalloc(&X_gpu, N * N * sizeof(DATA_TYPE));
hipMemcpy(A_gpu, A, N * N * sizeof(DATA_TYPE), hipMemcpyHostToDevice);
hipMemcpy(B_gpu, B, N * N * sizeof(DATA_TYPE), hipMemcpyHostToDevice);
hipMemcpy(X_gpu, X, N * N * sizeof(DATA_TYPE), hipMemcpyHostToDevice);
dim3 block1(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y, 1);
dim3 grid1(1, 1, 1);
grid1.x = (size_t)(ceil( ((float)N) / ((float)block1.x) ));
t_start = rtclock();
for (int t = 0; t < TSTEPS; t++)
{
hipLaunchKernelGGL(( adi_kernel1), dim3(grid1), dim3(block1), 0, 0, A_gpu, B_gpu, X_gpu);
hipDeviceSynchronize();
hipLaunchKernelGGL(( adi_kernel2), dim3(grid1), dim3(block1), 0, 0, A_gpu, B_gpu, X_gpu);
hipDeviceSynchronize();
hipLaunchKernelGGL(( adi_kernel3), dim3(grid1), dim3(block1), 0, 0, A_gpu, B_gpu, X_gpu);
hipDeviceSynchronize();
for (int i1 = 1; i1 < N; i1++)
{
hipLaunchKernelGGL(( adi_kernel4), dim3(grid1), dim3(block1), 0, 0, A_gpu, B_gpu, X_gpu, i1);
hipDeviceSynchronize();
}
hipLaunchKernelGGL(( adi_kernel5), dim3(grid1), dim3(block1), 0, 0, A_gpu, B_gpu, X_gpu);
hipDeviceSynchronize();
for (int i1 = 0; i1 < N-2; i1++)
{
hipLaunchKernelGGL(( adi_kernel6), dim3(grid1), dim3(block1), 0, 0, A_gpu, B_gpu, X_gpu, i1);
hipDeviceSynchronize();
}
}
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
hipMemcpy(B_outputFromGpu, B_gpu, N * N * sizeof(DATA_TYPE), hipMemcpyDeviceToHost);
hipMemcpy(X_outputFromGpu, X_gpu, N * N * sizeof(DATA_TYPE), hipMemcpyDeviceToHost);
hipFree(A_gpu);
hipFree(B_gpu);
hipFree(X_gpu);
}
int main(int argc, char *argv[])
{
double t_start, t_end;
GPU_argv_init();
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* B_outputFromGpu;
DATA_TYPE* X;
DATA_TYPE* X_outputFromGpu;
A = (DATA_TYPE*)malloc(N*N*sizeof(DATA_TYPE));
B = (DATA_TYPE*)malloc(N*N*sizeof(DATA_TYPE));
B_outputFromGpu = (DATA_TYPE*)malloc(N*N*sizeof(DATA_TYPE));
X = (DATA_TYPE*)malloc(N*N*sizeof(DATA_TYPE));
X_outputFromGpu = (DATA_TYPE*)malloc(N*N*sizeof(DATA_TYPE));
init_array(A, B, X);
adiCuda(A, B, X, B_outputFromGpu, X_outputFromGpu);
t_start = rtclock();
adi(A, B, X);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(B, B_outputFromGpu, X, X_outputFromGpu);
free(A);
free(B);
free(B_outputFromGpu);
free(X);
free(X_outputFromGpu);
return 0;
}
| dd976dedc1b2b182f02b3bdb7db264d85b649c4a.cu | /*********************************************************************************/
//
// Polybench kernels implementation on CUDA GPU
//
// Computer & Information Science, University of Delaware
// Author(s): Sudhee Ayalasomayajula ([email protected])
// John Cavazos ([email protected])
// Scott Grauer Gray([email protected])
// Robert Searles ([email protected])
// Lifan Xu ([email protected])
//
// Contact(s): Lifan Xu ([email protected])
// Reference(s):
//
/*********************************************************************************/
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include "../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 2.5
#define GPU_DEVICE 0
/* Problem size. */
#define TSTEPS 1
#define N 1024
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 256
#define DIM_THREAD_BLOCK_Y 1
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void adi(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X)
{
for (int t = 0; t < TSTEPS; t++)
{
for (int i1 = 0; i1 < N; i1++)
{
for (int i2 = 1; i2 < N; i2++)
{
X[i1*N + i2] = X[i1*N + i2] - X[i1*N + (i2-1)] * A[i1*N + i2] / B[i1*N + (i2-1)];
B[i1*N + i2] = B[i1*N + i2] - A[i1*N + i2] * A[i1*N + i2] / B[i1*N + (i2-1)];
}
}
for (int i1 = 0; i1 < N; i1++)
{
X[i1*N + (N-1)] = X[i1*N + (N-1)] / B[i1*N + (N-1)];
}
for (int i1 = 0; i1 < N; i1++)
{
for (int i2 = 0; i2 < N-2; i2++)
{
X[i1*N + (N-i2-2)] = (X[i1*N + (N-2-i2)] - X[i1*N + (N-2-i2-1)] * A[i1*N + (N-i2-3)]) / B[i1*N + (N-3-i2)];
}
}
for (int i1 = 1; i1 < N; i1++)
{
for (int i2 = 0; i2 < N; i2++)
{
X[i1*N + i2] = X[i1*N + i2] - X[(i1-1)*N + i2] * A[i1*N + i2] / B[(i1-1)*N + i2];
B[i1*N + i2] = B[i1*N + i2] - A[i1*N + i2] * A[i1*N + i2] / B[(i1-1)*N + i2];
}
}
for (int i2 = 0; i2 < N; i2++)
{
X[(N-1)*N + i2] = X[(N-1)*N + i2] / B[(N-1)*N + i2];
}
for (int i1 = 0; i1 < N-2; i1++)
{
for (int i2 = 0; i2 < N; i2++)
{
X[(N-2-i1)*N + i2] = (X[(N-2-i1)*N + i2] - X[(N-i1-3)*N + i2] * A[(N-3-i1)*N + i2]) / B[(N-2-i1)*N + i2];
}
}
}
}
void init_array(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X)
{
int i, j;
for (i = 0; i < N; i++)
{
for (j = 0; j < N; j++)
{
X[i*N + j] = ((DATA_TYPE) i*(j+1) + 1) / N;
A[i*N + j] = ((DATA_TYPE) (i-1)*(j+4) + 2) / N;
B[i*N + j] = ((DATA_TYPE) (i+3)*(j+7) + 3) / N;
}
}
}
void compareResults(DATA_TYPE* B_cpu, DATA_TYPE* B_fromGpu, DATA_TYPE* X_cpu, DATA_TYPE* X_fromGpu)
{
int i, j, fail;
fail = 0;
// Compare b and x output on cpu and gpu
for (i=0; i < N; i++)
{
for (j=0; j < N; j++)
{
if (percentDiff(B_cpu[i*N + j], B_fromGpu[i*N + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
//printf("1: %f\n 2: %f\n", B_cpu[i*N + j], B_fromGpu[i*N + j]);
}
}
}
for (i=0; i<N; i++)
{
for (j=0; j<(N); j++)
{
if (percentDiff(X_cpu[i*N + j], X_fromGpu[i*N + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
//printf("1: %f\n 2: %f\n", X_cpu[i*N + j], X_fromGpu[i*N + j]);
}
}
}
// Print results
printf("Number of misses: %d\n", fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( GPU_DEVICE );
}
__global__ void adi_kernel1(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X)
{
int i1 = blockIdx.x * blockDim.x + threadIdx.x;
if ((i1 < N))
{
for (int i2 = 1; i2 < N; i2++)
{
X[i1*N + i2] = X[i1*N + i2] - X[i1*N + (i2-1)] * A[i1*N + i2] / B[i1*N + (i2-1)];
B[i1*N + i2] = B[i1*N + i2] - A[i1*N + i2] * A[i1*N + i2] / B[i1*N + (i2-1)];
}
}
}
__global__ void adi_kernel2(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X)
{
int i1 = blockIdx.x * blockDim.x + threadIdx.x;
if ((i1 < N))
{
X[i1*N + (N-1)] = X[i1*N + (N-1)] / B[i1*N + (N-1)];
}
}
__global__ void adi_kernel3(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X)
{
int i1 = blockIdx.x * blockDim.x + threadIdx.x;
if ((i1 < N))
{
for (int i2 = 0; i2 < N-2; i2++)
{
X[i1*N + (N-i2-2)] = (X[i1*N + (N-2-i2)] - X[i1*N + (N-2-i2-1)] * A[i1*N + (N-i2-3)]) / B[i1*N + (N-3-i2)];
}
}
}
__global__ void adi_kernel4(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X, int i1)
{
int i2 = blockIdx.x * blockDim.x + threadIdx.x;
if ((i2 < N))
{
X[i1*N + i2] = X[i1*N + i2] - X[(i1-1)*N + i2] * A[i1*N + i2] / B[(i1-1)*N + i2];
B[i1*N + i2] = B[i1*N + i2] - A[i1*N + i2] * A[i1*N + i2] / B[(i1-1)*N + i2];
}
}
__global__ void adi_kernel5(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X)
{
int i2 = blockIdx.x * blockDim.x + threadIdx.x;
if ((i2 < N))
{
X[(N-1)*N + i2] = X[(N-1)*N + i2] / B[(N-1)*N + i2];
}
}
__global__ void adi_kernel6(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X, int i1)
{
int i2 = blockIdx.x * blockDim.x + threadIdx.x;
if ((i2 < N))
{
X[(N-2-i1)*N + i2] = (X[(N-2-i1)*N + i2] - X[(N-i1-3)*N + i2] * A[(N-3-i1)*N + i2]) / B[(N-2-i1)*N + i2];
}
}
void adiCuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* X, DATA_TYPE* B_outputFromGpu, DATA_TYPE* X_outputFromGpu)
{
double t_start, t_end;
DATA_TYPE* A_gpu;
DATA_TYPE* B_gpu;
DATA_TYPE* X_gpu;
cudaMalloc(&A_gpu, N * N * sizeof(DATA_TYPE));
cudaMalloc(&B_gpu, N * N * sizeof(DATA_TYPE));
cudaMalloc(&X_gpu, N * N * sizeof(DATA_TYPE));
cudaMemcpy(A_gpu, A, N * N * sizeof(DATA_TYPE), cudaMemcpyHostToDevice);
cudaMemcpy(B_gpu, B, N * N * sizeof(DATA_TYPE), cudaMemcpyHostToDevice);
cudaMemcpy(X_gpu, X, N * N * sizeof(DATA_TYPE), cudaMemcpyHostToDevice);
dim3 block1(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y, 1);
dim3 grid1(1, 1, 1);
grid1.x = (size_t)(ceil( ((float)N) / ((float)block1.x) ));
t_start = rtclock();
for (int t = 0; t < TSTEPS; t++)
{
adi_kernel1<<<grid1, block1>>>(A_gpu, B_gpu, X_gpu);
cudaThreadSynchronize();
adi_kernel2<<<grid1, block1>>>(A_gpu, B_gpu, X_gpu);
cudaThreadSynchronize();
adi_kernel3<<<grid1, block1>>>(A_gpu, B_gpu, X_gpu);
cudaThreadSynchronize();
for (int i1 = 1; i1 < N; i1++)
{
adi_kernel4<<<grid1, block1>>>(A_gpu, B_gpu, X_gpu, i1);
cudaThreadSynchronize();
}
adi_kernel5<<<grid1, block1>>>(A_gpu, B_gpu, X_gpu);
cudaThreadSynchronize();
for (int i1 = 0; i1 < N-2; i1++)
{
adi_kernel6<<<grid1, block1>>>(A_gpu, B_gpu, X_gpu, i1);
cudaThreadSynchronize();
}
}
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
cudaMemcpy(B_outputFromGpu, B_gpu, N * N * sizeof(DATA_TYPE), cudaMemcpyDeviceToHost);
cudaMemcpy(X_outputFromGpu, X_gpu, N * N * sizeof(DATA_TYPE), cudaMemcpyDeviceToHost);
cudaFree(A_gpu);
cudaFree(B_gpu);
cudaFree(X_gpu);
}
int main(int argc, char *argv[])
{
double t_start, t_end;
GPU_argv_init();
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* B_outputFromGpu;
DATA_TYPE* X;
DATA_TYPE* X_outputFromGpu;
A = (DATA_TYPE*)malloc(N*N*sizeof(DATA_TYPE));
B = (DATA_TYPE*)malloc(N*N*sizeof(DATA_TYPE));
B_outputFromGpu = (DATA_TYPE*)malloc(N*N*sizeof(DATA_TYPE));
X = (DATA_TYPE*)malloc(N*N*sizeof(DATA_TYPE));
X_outputFromGpu = (DATA_TYPE*)malloc(N*N*sizeof(DATA_TYPE));
init_array(A, B, X);
adiCuda(A, B, X, B_outputFromGpu, X_outputFromGpu);
t_start = rtclock();
adi(A, B, X);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(B, B_outputFromGpu, X, X_outputFromGpu);
free(A);
free(B);
free(B_outputFromGpu);
free(X);
free(X_outputFromGpu);
return 0;
}
|
3c740d2a6999fffa57934b4794fc1f2577ffa996.hip | // !!! This is a file automatically generated by hipify!!!
/* 159.735 Semester 2, 2016. Ian Bond, 3/10/2016
Sequential version of the N-sphere counting problem for Assignment
5. Two alternative algorithms are presented.
Note: a rethink will be needed when implementing a GPU version of
this. You can't just cut and paste code.
To compile: g++ -O3 -o nsphere nsphere.cpp
(you will get slightly better performance with the O3 optimization flag)
*/
#include <cstdlib>
#include <cmath>
#include <iostream>
#include <string>
#include <hip/hip_runtime.h>
#include <vector>
const long MAXDIM = 10;
const double RMIN = 2.0;
const double RMAX = 8.0;
const int MAX_POINTS_PER_THREAD = 500;
const int MAX_BPG_ONE_DIM = 1024;
const int MAX_TPB = 1024;
double diffclock(clock_t clock1, clock_t clock2)
{
double diffticks = clock1 - clock2;
double diffms = (diffticks * 1000) / CLOCKS_PER_SEC;
return diffms; // Time difference in milliseconds
}
/*
* Evaluate n**k where both are long integers
*/
long powlong(long n, long k)
{
long p = 1;
for (long i = 0; i < k; ++i) p *= n;
return p;
}
/*
* Convert a decimal number into another base system - the individual
* digits in the new base are stored in the index array.
*/
void convert(long num, long base, std::vector<long>& index)
{
const long ndim = index.size();
for (long i = 0; i < ndim; ++i) index[i] = 0;
long idx = 0;
while (num != 0) {
long rem = num % base;
num = num / base;
index[idx] = rem;
++idx;
}
}
long count_in_v1(long ndim, double radius)
{
const long halfb = static_cast<long>(floor(radius));
const long base = 2 * halfb + 1;
const double rsquare = radius * radius;
// This is the total number of points we will need to test.
const long ntotal = powlong(base, ndim);
std::cout << "Points need to be test " << ntotal << std::endl;
long count = 0;
// Indices in x,y,z,....
std::vector<long> index(ndim, 0);
// Loop over the total number of points. For each visit of the loop,
// we covert n to its equivalent in a number system of given "base".
for (long n = 0; n < ntotal; ++n) {
convert(n, base, index);
double rtestsq = 0;
for (long k = 0; k < ndim; ++k) {
double xk = index[k] - halfb;
rtestsq += xk * xk;
}
if (rtestsq < rsquare) ++count;
}
return count;
}
// kernel
__global__ void cuda_count(int ndim, double radius, long nfrom, long nto, long nthreads, int* counter)
{
long id = blockIdx.x * blockDim.x + threadIdx.x;
counter[id] = 0;
if (id >= nto)
return;
const long halfb = static_cast<long>(floor(radius));
const long base = 2 * halfb + 1;
const double rsquare = radius*radius;
long index = 0;
long num = nfrom + id;
//a thread might test more than one numbers
while (num < nto)
{
double rtestsq = 0;
for (int i=0; i<ndim; i++)
{
long rem = num % base;
num = num / base;
double xk = rem - halfb;
rtestsq += xk * xk;
}
if (rtestsq < rsquare )
{
atomicAdd(&counter[id], 1);
}
index++;
num = nfrom + id + nthreads*index;
}
}
long count_in_cuda(long ndim, double radius)
{
const long halfb = static_cast<long>(floor(radius));
const long base = 2 * halfb + 1;
// This is the total number of points we will need to test.
const long ntotal = powlong(base, ndim);
const int tpb_x = (ntotal<MAX_TPB)?ntotal:MAX_TPB;
//use maximum MAX_BPG_ONE_DIM x 1024 threads
int blocks = ntotal / MAX_TPB + 1;
if (blocks > MAX_BPG_ONE_DIM)
{
blocks = MAX_BPG_ONE_DIM;
}
const long nthreads = tpb_x*blocks;
int* counters = new int[nthreads];
memset(counters, 0, sizeof(int)*nthreads);
int* d_counters;
hipMalloc(&d_counters, sizeof(int)*nthreads);
long total_count = 0;
//invoke the kernel
//std::cout << "Launching a grid of " << nthreads << " threads" << std::endl;
const long points_for_each_call = MAX_POINTS_PER_THREAD*nthreads;
long nfrom = 0;
long nto = points_for_each_call;
do
{
if (nto > ntotal)
nto = ntotal;
//std::cout << "will handle [" << nfrom << ", " << nto << "]\n";
hipLaunchKernelGGL(( cuda_count) , dim3(blocks), dim3(tpb_x), 0, 0, ndim, radius, nfrom, nto, nthreads, d_counters);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
{
std::cout << "CUDA kernel error:\n"<<hipGetErrorString(err)<<std::endl;
break;
}
//copy the counters to host
hipMemcpy(counters, d_counters, sizeof(int)*nthreads, hipMemcpyDeviceToHost);
//sum all counters
for (long i = 0; i < nthreads; i++)
{
total_count += counters[i];
}
nfrom = nto;
nto += points_for_each_call;
}while (nfrom < ntotal);
hipFree(d_counters);
delete[] counters;
return total_count;
}
int main(int argc, char* argv[])
{
// You can make this larger if you want
const long ntrials = 20;
std::cout <<"r nd Seq Count Seq Time cuda Count tcuda Time"<<std::endl;
for (long n = 0; n < ntrials; ++n)
{
// Get a random value for the hypersphere radius between the two limits
const double r = drand48() * (RMAX - RMIN) + RMIN;
// Get a random value for the number of dimensions between 1 and MAXDIM inclusive
const long nd = lrand48() % (MAXDIM - 1) + 1;
clock_t tstart = clock();
const long count_s = count_in_v1(nd, r);
double ts = diffclock(clock(), tstart);
//std::cout << "Counted by sequential is "<< count_s << std::endl;
tstart = clock();
const long count_cuda = count_in_cuda(nd, r);
double tp = diffclock(clock(), tstart);
//std::cout << "Counted by CUDA is " << count_cuda << std::endl<<std::endl;
std::cout << r << "\t " << nd << "\t" << count_s << "\t" << ts <<"\t"<< count_cuda << "\t"<< tp <<std::endl;
}
}
| 3c740d2a6999fffa57934b4794fc1f2577ffa996.cu | /* 159.735 Semester 2, 2016. Ian Bond, 3/10/2016
Sequential version of the N-sphere counting problem for Assignment
5. Two alternative algorithms are presented.
Note: a rethink will be needed when implementing a GPU version of
this. You can't just cut and paste code.
To compile: g++ -O3 -o nsphere nsphere.cpp
(you will get slightly better performance with the O3 optimization flag)
*/
#include <cstdlib>
#include <cmath>
#include <iostream>
#include <string>
#include <cuda.h>
#include <vector>
const long MAXDIM = 10;
const double RMIN = 2.0;
const double RMAX = 8.0;
const int MAX_POINTS_PER_THREAD = 500;
const int MAX_BPG_ONE_DIM = 1024;
const int MAX_TPB = 1024;
double diffclock(clock_t clock1, clock_t clock2)
{
double diffticks = clock1 - clock2;
double diffms = (diffticks * 1000) / CLOCKS_PER_SEC;
return diffms; // Time difference in milliseconds
}
/*
* Evaluate n**k where both are long integers
*/
long powlong(long n, long k)
{
long p = 1;
for (long i = 0; i < k; ++i) p *= n;
return p;
}
/*
* Convert a decimal number into another base system - the individual
* digits in the new base are stored in the index array.
*/
void convert(long num, long base, std::vector<long>& index)
{
const long ndim = index.size();
for (long i = 0; i < ndim; ++i) index[i] = 0;
long idx = 0;
while (num != 0) {
long rem = num % base;
num = num / base;
index[idx] = rem;
++idx;
}
}
long count_in_v1(long ndim, double radius)
{
const long halfb = static_cast<long>(floor(radius));
const long base = 2 * halfb + 1;
const double rsquare = radius * radius;
// This is the total number of points we will need to test.
const long ntotal = powlong(base, ndim);
std::cout << "Points need to be test " << ntotal << std::endl;
long count = 0;
// Indices in x,y,z,....
std::vector<long> index(ndim, 0);
// Loop over the total number of points. For each visit of the loop,
// we covert n to its equivalent in a number system of given "base".
for (long n = 0; n < ntotal; ++n) {
convert(n, base, index);
double rtestsq = 0;
for (long k = 0; k < ndim; ++k) {
double xk = index[k] - halfb;
rtestsq += xk * xk;
}
if (rtestsq < rsquare) ++count;
}
return count;
}
// kernel
__global__ void cuda_count(int ndim, double radius, long nfrom, long nto, long nthreads, int* counter)
{
long id = blockIdx.x * blockDim.x + threadIdx.x;
counter[id] = 0;
if (id >= nto)
return;
const long halfb = static_cast<long>(floor(radius));
const long base = 2 * halfb + 1;
const double rsquare = radius*radius;
long index = 0;
long num = nfrom + id;
//a thread might test more than one numbers
while (num < nto)
{
double rtestsq = 0;
for (int i=0; i<ndim; i++)
{
long rem = num % base;
num = num / base;
double xk = rem - halfb;
rtestsq += xk * xk;
}
if (rtestsq < rsquare )
{
atomicAdd(&counter[id], 1);
}
index++;
num = nfrom + id + nthreads*index;
}
}
long count_in_cuda(long ndim, double radius)
{
const long halfb = static_cast<long>(floor(radius));
const long base = 2 * halfb + 1;
// This is the total number of points we will need to test.
const long ntotal = powlong(base, ndim);
const int tpb_x = (ntotal<MAX_TPB)?ntotal:MAX_TPB;
//use maximum MAX_BPG_ONE_DIM x 1024 threads
int blocks = ntotal / MAX_TPB + 1;
if (blocks > MAX_BPG_ONE_DIM)
{
blocks = MAX_BPG_ONE_DIM;
}
const long nthreads = tpb_x*blocks;
int* counters = new int[nthreads];
memset(counters, 0, sizeof(int)*nthreads);
int* d_counters;
cudaMalloc(&d_counters, sizeof(int)*nthreads);
long total_count = 0;
//invoke the kernel
//std::cout << "Launching a grid of " << nthreads << " threads" << std::endl;
const long points_for_each_call = MAX_POINTS_PER_THREAD*nthreads;
long nfrom = 0;
long nto = points_for_each_call;
do
{
if (nto > ntotal)
nto = ntotal;
//std::cout << "will handle [" << nfrom << ", " << nto << "]\n";
cuda_count <<<blocks, tpb_x>>>(ndim, radius, nfrom, nto, nthreads, d_counters);
cudaError err = cudaGetLastError();
if (err != cudaSuccess)
{
std::cout << "CUDA kernel error:\n"<<cudaGetErrorString(err)<<std::endl;
break;
}
//copy the counters to host
cudaMemcpy(counters, d_counters, sizeof(int)*nthreads, cudaMemcpyDeviceToHost);
//sum all counters
for (long i = 0; i < nthreads; i++)
{
total_count += counters[i];
}
nfrom = nto;
nto += points_for_each_call;
}while (nfrom < ntotal);
cudaFree(d_counters);
delete[] counters;
return total_count;
}
int main(int argc, char* argv[])
{
// You can make this larger if you want
const long ntrials = 20;
std::cout <<"r nd Seq Count Seq Time cuda Count tcuda Time"<<std::endl;
for (long n = 0; n < ntrials; ++n)
{
// Get a random value for the hypersphere radius between the two limits
const double r = drand48() * (RMAX - RMIN) + RMIN;
// Get a random value for the number of dimensions between 1 and MAXDIM inclusive
const long nd = lrand48() % (MAXDIM - 1) + 1;
clock_t tstart = clock();
const long count_s = count_in_v1(nd, r);
double ts = diffclock(clock(), tstart);
//std::cout << "Counted by sequential is "<< count_s << std::endl;
tstart = clock();
const long count_cuda = count_in_cuda(nd, r);
double tp = diffclock(clock(), tstart);
//std::cout << "Counted by CUDA is " << count_cuda << std::endl<<std::endl;
std::cout << r << "\t " << nd << "\t" << count_s << "\t" << ts <<"\t"<< count_cuda << "\t"<< tp <<std::endl;
}
}
|
3335e8ea3b53a3b302237457ace031290fc45f69.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include"CudaHelper.cuh"
#include"CudaInterface.hpp"
typedef unsigned char uchar;
#define RELU(x) fmaxf(x, 0.0f)
#define L2 0
#define L3 1
#define L4 2
#define L5 3
#define L6 4
#define L7 5
#define L8 6
#define L9 7
#define CHANNEL1TO8(n, Level) \
tl * HDNL##Level##kernelsL1[n * 9 + 0] + tc * HDNL##Level##kernelsL1[n * 9 + 1] + tr * HDNL##Level##kernelsL1[n * 9 + 2] + \
ml * HDNL##Level##kernelsL1[n * 9 + 3] + mc * HDNL##Level##kernelsL1[n * 9 + 4] + mr * HDNL##Level##kernelsL1[n * 9 + 5] + \
bl * HDNL##Level##kernelsL1[n * 9 + 6] + bc * HDNL##Level##kernelsL1[n * 9 + 7] + br * HDNL##Level##kernelsL1[n * 9 + 8] + HDNL##Level##biasL1[n]
#define CHANNEL8TO8(n, Level) \
tl1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 0] + tc1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 1] + tr1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 2] + \
ml1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 3] + mc1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 4] + mr1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 5] + \
bl1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 6] + bc1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 7] + br1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 8] + \
tl1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 0] + tc1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 1] + tr1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 2] + \
ml1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 3] + mc1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 4] + mr1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 5] + \
bl1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 6] + bc1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 7] + br1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 8] + \
tl1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 0] + tc1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 1] + tr1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 2] + \
ml1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 3] + mc1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 4] + mr1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 5] + \
bl1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 6] + bc1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 7] + br1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 8] + \
tl1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 0] + tc1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 1] + tr1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 2] + \
ml1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 3] + mc1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 4] + mr1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 5] + \
bl1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 6] + bc1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 7] + br1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 8] + \
tl2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 0] + tc2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 1] + tr2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 2] + \
ml2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 3] + mc2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 4] + mr2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 5] + \
bl2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 6] + bc2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 7] + br2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 8] + \
tl2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 0] + tc2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 1] + tr2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 2] + \
ml2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 3] + mc2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 4] + mr2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 5] + \
bl2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 6] + bc2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 7] + br2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 8] + \
tl2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 0] + tc2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 1] + tr2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 2] + \
ml2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 3] + mc2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 4] + mr2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 5] + \
bl2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 6] + bc2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 7] + br2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 8] + \
tl2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 0] + tc2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 1] + tr2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 2] + \
ml2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 3] + mc2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 4] + mr2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 5] + \
bl2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 6] + bc2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 7] + br2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 8] + HDNL##Level##biasL[L][n]
#define RUNKERNEL(Level) \
conv1To8HDNL##Level << <dimGrid, dimBlock >> > (inTex, surf1, param->orgW, param->orgH); \
conv8To8HDNL##Level << <dimGrid, dimBlock >> > (surf1, surf2, param->orgW, param->orgH, L2); \
conv8To8HDNL##Level << <dimGrid, dimBlock >> > (surf2, surf1, param->orgW, param->orgH, L3); \
conv8To8HDNL##Level << <dimGrid, dimBlock >> > (surf1, surf2, param->orgW, param->orgH, L4); \
conv8To8HDNL##Level << <dimGrid, dimBlock >> > (surf2, surf1, param->orgW, param->orgH, L5); \
conv8To8HDNL##Level << <dimGrid, dimBlock >> > (surf1, surf2, param->orgW, param->orgH, L6); \
conv8To8HDNL##Level << <dimGrid, dimBlock >> > (surf2, surf1, param->orgW, param->orgH, L7); \
conv8To8HDNL##Level << <dimGrid, dimBlock >> > (surf1, surf2, param->orgW, param->orgH, L8); \
conv8To8HDNL##Level << <dimGrid, dimBlock >> > (surf2, surf1, param->orgW, param->orgH, L9); \
convTranspose8To1HDNL##Level << <dimGridout, dimBlock >> > (surf1, outSurf, W, H);
inline __device__ float clamp(float f, float a, float b)
{
return fmaxf(a, fminf(f, b));
}
static __device__ __constant__ const float HDNL0kernelsL1[9 * 8] =
{
0.0609, 0.1027, -0.0447,
-0.1423, 0.7196, 0.1803,
0.0842, 0.0696, 0.0082,
0.0089, 0.1540, -0.8589,
0.0448, 0.8659, -0.2420,
-0.0364, 0.0585, 0.0125,
-0.1937, 0.7259, 0.0119,
-0.8266, 0.4147, 0.0088,
-0.0453, -0.0451, -0.0182,
0.0264, -0.9422, 0.1258,
-0.0543, 0.1282, 0.7102,
-0.0106, 0.0386, -0.0141,
0.2054, -0.0393, 0.1494,
0.3106, 0.5722, 0.2640,
0.1708, -0.1640, -0.0212,
0.0558, -0.2887, -0.1666,
0.3123, -0.3097, -0.2281,
0.2880, 0.3001, 0.0526,
-0.0320, 0.0584, -0.0193,
-0.0135, 1.0649, -0.1246,
0.0283, -0.3030, -0.6378,
-0.0040, -0.9122, 0.0181,
0.0365, 0.8947, -0.0420,
-0.0199, 0.0217, 0.0060
};
static __device__ __constant__ const float HDNL0biasL1[8] =
{
-0.7577, -0.0210, 0.0292, -0.0189, 0.0223, 0.0340, 0.0150, -0.0044
};
static __device__ __constant__ const float HDNL0kernelsL[8][9 * 8 * 8] =
{
{
2.0611e-01, 6.6865e-02, -9.9123e-02,
8.5279e-02, -4.5549e-02, -2.9491e-02,
-1.0358e-01, -2.4844e-02, -8.1539e-03,
-1.1308e-01, -6.4228e-02, -8.8081e-02,
2.7810e-02, -1.6054e-01, -1.1985e-01,
-2.8679e-01, -1.7785e-02, 1.1559e-01,
2.1614e-02, -6.8870e-02, -2.4707e-01,
9.6867e-02, -1.6561e-01, 2.8281e-02,
-8.2469e-02, -9.8554e-02, -1.7147e-02,
3.3710e-01, 9.2126e-02, 3.6880e-02,
5.7004e-02, 4.0175e-02, 1.6116e-01,
2.5629e-01, 5.1154e-01, 2.4119e-02,
1.9495e-02, 2.6940e-01, -1.4050e-01,
5.0325e-02, -4.5920e-02, -1.3586e-01,
5.9458e-02, 1.3860e-01, -2.1065e-01,
-1.0744e-01, -1.5915e-01, -1.1528e-02,
-1.1470e-01, 6.3455e-02, -5.5558e-02,
-6.9920e-02, -3.0142e-02, -4.9059e-02,
3.6421e-01, 3.0252e-01, -1.3562e-01,
1.5238e-01, -1.9868e-01, -3.2644e-02,
-4.2849e-02, 1.3677e-02, 7.3854e-02,
7.6609e-02, -1.0121e-01, 3.6319e-02,
9.3536e-02, 6.0386e-02, 1.0086e-01,
-2.6630e-01, 2.5875e-02, -1.9225e-01,
4.0687e-02, 1.1005e-01, 9.9578e-03,
1.6939e-01, 5.0872e-01, 8.9876e-02,
6.9561e-02, 1.1910e-01, -1.8091e-02,
-3.5739e-02, -7.5300e-02, -1.6788e-02,
3.0316e-02, 1.5942e-01, -9.0878e-02,
-6.3737e-02, 2.6141e-02, 8.8040e-03,
3.4954e-03, -6.6707e-02, 1.4551e-01,
7.6258e-02, 1.4893e-01, -1.5255e-01,
6.2442e-02, 2.2166e-01, 7.5327e-02,
5.4785e-02, -1.4503e-02, -1.5188e-03,
1.6748e-01, -5.2731e-03, -1.9900e-02,
4.4786e-02, -1.0669e-01, 1.3192e-01,
1.9961e-02, -8.1015e-02, -3.2264e-02,
1.0544e-01, 1.8844e-01, 7.4274e-03,
6.6729e-02, -7.8318e-02, 3.0775e-02,
-8.6109e-03, 7.4977e-02, 9.4079e-02,
-1.2726e-01, -2.9664e-01, 7.8153e-03,
-4.8413e-02, -1.8450e-01, -7.1065e-02,
-8.7609e-02, -7.7192e-02, 5.0919e-02,
-1.4021e-01, 3.5696e-01, 1.2079e-02,
-2.0318e-02, -1.8827e-02, 3.9084e-02,
-2.8654e-02, -6.4166e-02, 5.4889e-02,
8.2689e-02, 8.4463e-02, 2.2339e-02,
1.0805e-01, -1.2566e-01, 1.7109e-01,
-6.1338e-02, -3.4043e-02, 4.0473e-02,
6.3821e-02, 1.7626e-01, -5.8112e-02,
-9.5002e-02, 1.3327e-02, 1.2242e-01,
4.9008e-02, -4.3678e-02, 2.2362e-02,
-7.7903e-02, -3.8252e-02, -5.2271e-02,
-1.8884e-02, -1.2859e-01, 4.1172e-02,
-3.1181e-02, 3.2348e-02, -4.9081e-02,
-6.7966e-02, -2.4896e-02, -6.5323e-02,
8.0742e-02, 2.6093e-01, -2.4638e-01,
-8.0881e-02, -2.9643e-02, -7.9627e-02,
1.4020e-01, 2.1575e-01, 8.1244e-03,
2.1561e-01, -2.9305e-01, -2.5535e-02,
-8.5538e-02, -1.4456e-01, -7.5664e-02,
-3.9921e-02, 4.0659e-02, 1.7812e-01,
1.1580e-01, 5.6628e-02, 9.0008e-02,
-2.2384e-02, -1.9788e-02, -4.0547e-02,
1.0070e-01, 2.9581e-01, 1.9936e-01,
-1.1957e-01, -8.6508e-02, -8.2543e-04,
-5.2879e-02, 1.5486e-01, 1.0829e-02,
1.4716e-01, 3.4257e-01, -3.2058e-03,
-2.1687e-02, 5.8641e-02, -6.3806e-02,
-3.2607e-02, 7.3328e-02, -6.4738e-03,
-1.0031e-01, -1.7698e-01, -9.4201e-02,
-3.3644e-02, -3.5860e-01, -9.3200e-02,
-7.4142e-02, 5.5001e-02, 4.3741e-02,
-2.2447e-03, 1.1941e-01, -1.6135e-02,
-1.4764e-02, -1.0194e-02, 3.2540e-02,
-1.0588e-01, -2.3000e-01, -1.1557e-02,
-9.0254e-02, 2.3352e-01, -1.3622e-01,
-1.9256e-03, -5.3372e-02, 1.0314e-01,
-2.0100e-02, 1.0700e-01, 1.6108e-01,
2.8422e-02, 2.7909e-01, 3.8342e-01,
1.4025e-02, 9.0965e-02, 2.0218e-01,
3.3562e-03, 7.6652e-02, 4.5974e-02,
-1.3617e-02, -1.4014e-01, -1.9253e-02,
1.1020e-01, -1.9678e-01, 6.7123e-02,
-3.3294e-02, -1.3006e-01, -1.0111e-01,
5.5813e-02, 2.1127e-01, 2.0248e-02,
-9.6386e-04, -5.2497e-03, 1.1134e-01,
2.8910e-02, 1.2229e-01, 1.8439e-01,
1.6413e-02, 1.5870e-01, -1.1616e-01,
-1.6032e-03, -6.8258e-03, -2.1883e-02,
1.2052e-01, -2.1982e-02, -1.3088e-01,
2.8664e-02, -5.0670e-02, 2.2927e-01,
2.0461e-02, 7.7250e-03, -2.6630e-02,
-9.0406e-02, -1.4174e-01, 9.8969e-02,
-6.6573e-02, -2.4425e-01, -3.5126e-02,
9.3859e-02, 1.9058e-01, -1.6569e-01,
-4.9163e-03, 7.4149e-02, 6.3345e-02,
-1.7888e-02, -9.1876e-02, 1.3728e-01,
-9.6098e-02, -3.4814e-02, -1.0862e-02,
4.8031e-03, 2.5206e-01, 8.0316e-02,
1.5102e-01, 4.1236e-02, 2.2339e-01,
2.8500e-01, 1.5106e-01, 9.6321e-04,
-6.0741e-02, 3.5759e-02, -1.8829e-01,
-1.1295e-03, -6.2322e-02, 8.4974e-01,
-3.9817e-02, -2.0666e-01, 2.2961e-01,
3.6857e-02, -2.0211e-02, -9.3342e-02,
2.0827e-02, 6.8874e-02, -6.0287e-02,
-6.9724e-02, 1.4423e-01, -7.6017e-02,
1.4718e-02, 1.8990e-01, 1.1789e-01,
-1.5018e-01, -2.3071e-01, 1.7511e-01,
-7.7605e-02, 5.0621e-02, -1.0381e-01,
8.6845e-02, -1.2410e-01, -4.4669e-01,
2.7930e-02, -5.4713e-02, -7.7923e-02,
8.6000e-02, -2.6371e-02, -8.6541e-02,
-1.1521e-01, 1.4389e-01, 5.0507e-02,
-1.6618e-02, -2.5150e-01, -4.9759e-02,
7.7166e-02, 4.5033e-03, -5.4649e-02,
2.8548e-03, -2.8078e-03, 8.1129e-02,
-4.5973e-02, 3.6740e-03, 2.0746e-01,
-9.8191e-02, 1.2807e-01, 8.1950e-03,
1.4240e-01, 1.5104e-01, 6.9624e-02,
2.2309e-01, 2.5688e-01, 9.4766e-02,
6.2560e-02, 7.1347e-02, 4.1432e-02,
-3.1829e-02, 1.5207e-01, 2.0575e-02,
-1.2506e-01, 2.9274e-01, 9.4712e-02,
-2.0520e-01, 4.9894e-04, 5.6171e-02,
-4.1567e-03, 6.6753e-02, -1.5767e-01,
6.3768e-02, 8.3008e-02, -3.5639e-01,
4.4660e-02, 2.6996e-01, -6.4014e-02,
8.5475e-02, 1.7854e-02, -6.4079e-02,
1.8760e-01, 1.5285e-01, -3.5614e-02,
1.0747e-02, -3.1330e-01, -4.8664e-02,
7.2150e-02, 1.7570e-01, 1.6716e-01,
6.2431e-02, 2.3755e-01, 2.8554e-01,
3.5791e-02, 2.8185e-01, 1.5810e-01,
-4.0886e-02, 1.8833e-02, -8.2903e-03,
1.3994e-02, -1.0846e-01, 3.5315e-02,
-6.2674e-02, 6.2806e-02, 2.2168e-02,
-3.6236e-01, -2.5326e-01, 5.6331e-02,
9.8762e-02, 3.8049e-01, 5.9885e-02,
-3.0541e-02, 7.9855e-02, -5.8639e-02,
1.1104e-03, 1.7147e-02, 3.3115e-02,
-3.3663e-02, 7.4615e-02, 6.4211e-02,
-7.3441e-02, -1.5568e-01, 7.6546e-02,
6.1802e-02, -1.5300e-01, -1.8209e-02,
-9.2786e-03, 1.6622e-01, 1.1354e-01,
9.5865e-03, -2.4226e-02, -1.4750e-03,
-5.5294e-02, -1.1839e-01, 3.8867e-03,
1.7262e-01, 4.2743e-01, 6.8970e-02,
-2.0232e-01, -1.4564e-01, 2.3025e-02,
-2.6139e-03, -1.6907e-02, 1.1693e-01,
-9.4871e-03, 3.8488e-02, -4.8351e-02,
-9.2171e-02, 4.8227e-02, 9.7378e-02,
-1.0292e-01, -1.2084e-01, -9.6676e-02,
1.8103e-02, 3.0658e-01, -7.7755e-02,
-2.4362e-02, -1.9862e-01, -6.9665e-02,
8.2944e-03, -1.4680e-01, -1.7371e-02,
-1.6534e-01, 2.5752e-01, 1.1129e-01,
-9.4151e-02, -1.3225e-01, 1.5933e-01,
9.0723e-02, 5.5469e-02, -1.4091e-01,
8.3404e-02, 1.3741e-01, -3.5438e-02,
3.2681e-02, 2.8491e-02, 1.4278e-02,
2.3789e-01, -2.3687e-03, -5.3264e-03,
-1.1161e-01, 1.9351e-02, 5.0832e-02,
8.2246e-03, 2.9892e-02, -3.7197e-02,
4.8236e-02, 1.6945e-01, 1.3673e-01,
1.1236e-01, 7.2318e-01, -4.1618e-02,
2.7494e-01, 1.0081e-01, -8.5399e-03,
-5.6151e-02, 8.1212e-02, -7.5770e-02,
2.7872e-02, 9.4644e-02, 1.1175e-02,
-6.1539e-02, 7.7395e-02, -3.2495e-02,
-5.1640e-02, 2.1028e-03, 1.5825e-02,
-1.1004e-01, 2.3153e-01, -6.1653e-02,
-2.6497e-02, 5.9461e-01, 4.0865e-02,
-1.9956e-02, 7.9328e-02, -1.7002e-02,
-5.5930e-03, 5.2015e-02, 7.7945e-04,
1.0136e-02, -9.0111e-02, -1.1175e-01,
-3.1781e-02, 1.4686e-01, -7.5718e-03,
1.1036e-02, 2.4618e-01, 8.5951e-02,
3.4775e-02, -1.2184e-01, 1.8010e-01,
-3.6781e-02, -1.3912e-01, -4.9172e-02,
3.3064e-02, 5.0582e-01, 1.0713e-02,
-1.2934e-02, -1.7697e-01, -1.4954e-01,
2.2229e-02, -5.8568e-03, -5.0186e-02,
1.9648e-02, -1.1302e-01, 1.5629e-02,
-3.5015e-02, 9.5032e-02, -2.9677e-02,
9.5173e-02, -3.0330e-02, -3.7652e-02,
-2.6097e-03, 7.4723e-01, -7.6234e-03,
-3.8826e-02, 1.0191e-01, 3.6589e-03,
-2.6503e-02, -1.1133e-01, -2.2029e-02,
-1.9101e-01, -2.1108e-01, -7.4371e-02,
-7.9349e-02, -1.0405e-01, 5.0315e-02
}
,
{
-4.2606e-02, -8.9001e-02, -6.4006e-02,
1.1132e-01, 7.6609e-02, 8.6417e-02,
7.6477e-03, -1.6416e-02, -8.2094e-02,
1.0779e-01, 2.1837e-01, 1.8094e-01,
-2.6306e-02, -1.2452e-01, 1.2662e-02,
3.1633e-02, 1.8717e-02, 3.1043e-02,
4.0927e-02, 5.0311e-02, 1.1648e-01,
2.2429e-01, 2.0757e-01, 4.3662e-03,
3.6341e-02, -4.7637e-02, 8.3645e-02,
-8.9260e-03, 1.8507e-02, 7.9069e-02,
-1.9411e-01, -8.6847e-02, -3.6639e-03,
4.0328e-02, -3.6821e-02, -8.5387e-02,
5.8173e-02, 5.9991e-02, -3.1398e-02,
1.5818e-01, 3.0861e-01, -2.3818e-02,
1.2176e-01, 6.7520e-02, 8.9401e-02,
-2.8859e-02, -1.2237e-01, -1.0625e-01,
3.1675e-02, 1.4172e-01, -1.4373e-01,
1.4653e-02, 1.0205e-01, 6.2557e-02,
-8.7292e-02, -2.1255e-02, 3.6830e-02,
-5.4417e-02, 3.0501e-01, 1.6897e-01,
-2.2187e-02, -8.9609e-02, -2.2830e-02,
4.9846e-02, 3.3395e-01, -3.1561e-02,
-1.3191e-02, 4.2663e-01, -6.9727e-02,
1.4570e-02, -4.0002e-02, 5.6394e-02,
-8.2547e-02, 1.9249e-01, 1.5591e-01,
1.4536e-01, -1.0409e-01, 1.2382e-01,
1.8189e-01, 9.2917e-02, -1.4394e-01,
-5.6260e-02, -2.7043e-01, 1.5392e-02,
-1.4305e-02, 1.1131e-01, -8.5913e-02,
7.7914e-02, -6.5484e-03, -1.8375e-01,
-1.4059e-01, -5.7339e-01, -3.9073e-02,
-1.1701e-01, -3.1806e-02, 7.7726e-02,
2.1688e-02, 9.9297e-02, 3.8224e-02,
7.9884e-02, 5.2461e-02, 1.0318e-01,
4.0054e-02, 1.4695e-01, 1.2577e-01,
-1.8790e-03, -4.9421e-02, 2.3235e-02,
-8.9820e-02, -1.6994e-01, -1.5986e-01,
2.3436e-01, -1.5346e-01, 1.5014e-02,
-3.9139e-02, -7.9388e-02, -4.9057e-02,
-1.1193e-01, -2.5705e-01, 1.1995e-01,
5.7929e-02, 2.4988e-01, -4.9406e-03,
-3.9363e-02, -1.1691e-02, -1.2236e-03,
-2.0521e-01, 2.1901e-01, 1.5957e-01,
2.1062e-01, -1.4157e-01, -3.4340e-01,
3.8520e-02, -2.0820e-01, 2.4570e-03,
1.7211e-01, 2.0214e-01, 1.3821e-01,
-7.1520e-02, 1.4847e-01, -1.3820e-01,
-2.4712e-02, -1.5925e-02, 1.7403e-02,
-3.7515e-02, 3.0461e-02, -2.7543e-02,
8.6148e-02, -6.1486e-02, 1.2610e-02,
2.9748e-03, 1.1778e-01, 2.9032e-02,
-2.1706e-02, -2.2406e-02, 2.6769e-02,
-3.6965e-02, 2.2180e-01, -4.0929e-02,
-3.2629e-03, 8.3419e-02, -1.4587e-01,
-1.3909e-02, -2.0166e-02, -1.0029e-01,
7.6360e-02, 8.0819e-02, -1.0933e-01,
-5.8919e-02, 2.4745e-02, 3.7375e-02,
-1.1333e-02, 1.4747e-02, -7.8958e-02,
-3.1535e-02, 1.7403e-01, 1.3946e-02,
-3.2038e-02, 5.1151e-02, -6.1063e-02,
-8.6472e-03, -6.9689e-02, 5.6846e-03,
5.7914e-02, -1.9818e-01, -7.5321e-02,
8.7453e-02, 7.8354e-02, 2.1997e-02,
-4.7606e-02, 1.3915e-01, 1.1653e-01,
9.6050e-02, 4.0099e-01, 1.5631e-01,
3.1492e-02, 2.4797e-01, 6.8716e-02,
-6.2664e-03, 9.1754e-02, -5.7244e-03,
1.3538e-01, 1.5366e-01, 9.4916e-02,
-4.2115e-02, -3.6585e-01, -1.4559e-01,
9.1550e-02, -5.4007e-02, 6.7482e-02,
-1.8687e-01, 3.2120e-01, 5.1031e-03,
-6.1205e-02, -5.1780e-02, 1.6442e-02,
-1.2316e-02, -1.3907e-01, -1.4446e-01,
-2.7899e-01, -8.5969e-02, -1.0870e-01,
-2.6157e-01, 8.9532e-02, 3.0958e-02,
-1.5393e-01, -4.2781e-02, -2.0951e-01,
2.0328e-01, 4.5317e-01, -3.0467e-02,
-6.1346e-02, 1.0381e-01, -1.3719e-01,
-9.8572e-02, -1.4035e-01, -1.9431e-02,
2.5542e-02, 3.2609e-01, 1.7983e-03,
-1.0800e-01, -2.9022e-02, 6.2691e-03,
2.8937e-02, -1.3483e-01, -4.1655e-02,
2.0172e-01, 1.4283e-02, 9.6200e-02,
1.9027e-02, 3.1240e-01, -2.9553e-02,
6.2776e-02, 1.3845e-01, 4.5834e-02,
-2.3854e-01, -4.0267e-02, 1.5634e-02,
-1.9246e-01, -3.2332e-02, 3.2442e-03,
-6.1880e-02, -8.8192e-02, -6.0172e-02,
2.5002e-01, 1.5148e-01, 6.4459e-02,
-2.1022e-01, -8.3893e-02, 6.9554e-03,
7.0244e-02, -2.9551e-02, 1.6481e-02,
-3.1036e-02, -2.0026e-01, -8.4748e-02,
-1.3108e-01, -1.3784e-01, 9.4900e-02,
-2.1256e-01, -4.1767e-02, 8.4665e-02,
-4.0235e-01, 1.0604e-01, -3.1827e-02,
-4.9825e-02, -9.1267e-04, 1.5527e-02,
-6.5729e-03, -1.8932e-02, -3.4591e-02,
1.1066e-01, 9.3979e-02, 2.6059e-02,
-1.2395e-01, -2.4768e-01, -1.6304e-01,
8.8329e-03, -2.1606e-02, -4.0878e-02,
-1.5581e-02, -1.4829e-02, -1.5959e-02,
-1.0463e-04, -4.2903e-03, -4.6657e-02,
2.2995e-02, 1.7917e-02, -9.1404e-02,
-1.2326e-01, 1.4582e-01, -7.0959e-02,
-1.8058e-02, -8.5228e-02, 4.2799e-02,
-2.2829e-03, 8.6577e-02, -1.1909e-01,
-1.8061e-01, 1.1166e-01, -8.2255e-02,
-1.3190e-01, 7.7123e-02, 2.3224e-02,
1.8661e-02, 2.4461e-02, 3.6060e-02,
-4.5224e-02, -1.7672e-01, 1.6080e-01,
-4.2175e-01, -2.2557e-01, -1.0719e-01,
-2.9506e-02, 9.5020e-02, -6.6465e-02,
-7.2627e-02, 3.1236e-01, 5.5764e-02,
-2.8789e-01, -1.8915e-01, 9.0825e-02,
-5.8618e-02, 6.4082e-02, 4.8461e-03,
-5.9405e-02, 3.2644e-01, -7.1278e-02,
-1.8084e-01, 2.0858e-02, -9.3690e-03,
-7.6565e-03, -9.6854e-02, 7.6121e-03,
1.4791e-01, 4.5612e-01, 1.9889e-02,
-5.5498e-02, -1.1266e-01, 2.2790e-02,
-3.8821e-02, -1.5780e-02, 1.2549e-02,
-3.8232e-02, -2.8870e-01, 2.6216e-02,
1.0375e-01, -2.9621e-02, 1.8479e-03,
5.0207e-02, 1.5189e-01, 1.2533e-01,
1.8298e-01, -1.2870e-01, 3.0681e-01,
-1.9571e-02, -8.6302e-02, 9.1121e-02,
1.0113e-01, -1.8362e-01, 3.2642e-02,
1.7034e-01, -3.1077e-01, -4.8737e-02,
5.9144e-02, 5.6052e-03, 3.2360e-02,
-9.0123e-02, 7.7996e-02, 3.6297e-02,
-3.4389e-01, 1.1841e-01, -2.0900e-02,
9.4930e-02, -9.1504e-02, -4.5308e-02,
3.7723e-03, -3.7580e-02, -6.6410e-02,
5.2501e-02, -1.2530e-01, 3.5944e-02,
3.8378e-02, 9.5188e-02, 2.1952e-03,
-2.4333e-02, 2.7977e-01, 5.6961e-02,
-3.0605e-03, 8.3684e-02, 4.4848e-03,
-7.8935e-02, -1.9544e-01, -5.3311e-02,
-2.6595e-02, 1.2278e-01, -3.1659e-02,
-1.0103e-02, 4.7763e-01, 2.5359e-02,
8.1397e-02, 3.0548e-01, 9.7097e-02,
3.6232e-02, -1.1091e-01, 1.2841e-01,
1.9277e-01, 2.9322e-01, -1.6740e-01,
1.2107e-01, -6.2883e-02, 4.0603e-02,
-1.5750e-01, -8.6183e-02, -1.4194e-01,
1.1932e-01, -3.9175e-01, -5.4495e-02,
-1.4001e-02, -2.0594e-01, -8.2683e-02,
8.6156e-02, 2.1499e-02, 2.2080e-01,
5.5703e-02, -3.6307e-01, 8.3129e-02,
8.9280e-02, -3.5897e-02, 1.6106e-01,
9.1171e-02, -3.1102e-01, 1.2425e-01,
1.0278e-01, -3.1014e-01, -6.9138e-02,
8.0839e-02, -3.6183e-02, 1.0341e-01,
-1.8334e-01, -5.3700e-02, 2.3336e-01,
-1.4464e-01, -5.0320e-01, -2.9836e-02,
-1.7225e-01, -3.9499e-01, -1.7321e-01,
1.7510e-01, 1.7897e-01, -2.6518e-01,
2.3638e-01, 5.0270e-01, -4.9731e-03,
2.2603e-01, 2.5317e-01, 2.4079e-01,
-1.3159e-01, 1.5638e-01, 1.2480e-01,
-6.2164e-02, 7.9458e-02, -9.4804e-02,
8.5690e-03, 7.4971e-03, 8.6630e-02,
-1.3148e-02, 6.8660e-02, -7.4230e-03,
2.9702e-02, 1.2036e-01, 9.5504e-02,
-3.2694e-03, 8.6722e-02, -6.2433e-02,
3.2527e-01, 3.2087e-01, -9.4429e-05,
1.3556e-01, -7.0413e-02, 2.9383e-02,
2.0617e-02, 3.3218e-02, 4.4898e-02,
-4.8260e-01, -2.1329e-01, 1.5890e-02,
-2.6600e-01, -8.8519e-02, -4.3800e-02,
-1.7299e-01, -2.0757e-01, -2.6658e-01,
6.9707e-02, -4.4700e-02, 6.5570e-02,
2.3992e-01, 1.5078e-01, 2.8713e-02,
-9.1197e-02, 1.9765e-02, -1.8751e-02,
-9.9277e-02, -3.1437e-01, 4.0730e-02,
2.4208e-02, -8.8322e-02, -1.6245e-01,
1.3037e-02, -3.4708e-02, -4.4285e-02,
-1.3592e-01, -1.3575e-01, -7.4546e-02,
1.4670e-01, -1.3366e-01, 2.1553e-03,
8.1235e-03, -1.2068e-01, -5.7287e-02,
1.8015e-01, 2.1390e-01, 8.6923e-03,
2.8833e-01, 6.6345e-02, 1.4578e-01,
2.2338e-01, 2.6453e-01, -2.9112e-02,
1.4018e-01, -9.2824e-02, -2.2795e-02,
1.2360e-01, 2.2527e-01, -1.1817e-01,
-3.8872e-02, -1.9982e-02, -7.7514e-02,
1.7744e-03, 3.1736e-02, 4.5882e-02,
-2.5222e-02, 2.4298e-01, -3.8596e-02,
1.2545e-02, 3.1872e-02, 7.1925e-02,
7.9782e-02, -1.5533e-01, -1.4619e-02,
-1.2223e-01, -1.8631e-03, -9.8832e-02,
-1.6815e-02, -8.1440e-02, 6.8038e-02
}
,
{
2.3898e-02, 1.2411e-02, -3.2770e-02,
-2.6029e-01, 3.2690e-01, -1.8246e-01,
1.1224e-02, 8.0193e-02, -5.0412e-02,
-9.3849e-02, 2.0325e-02, 2.6309e-02,
1.2266e-02, 1.7698e-01, 2.7049e-01,
1.2918e-01, 2.0190e-01, 2.7352e-01,
-7.2100e-02, 1.3357e-01, -1.3702e-01,
2.2527e-01, 1.5821e-01, -2.3104e-01,
1.0182e-02, -1.5499e-01, 7.1906e-02,
1.5865e-01, 7.0950e-02, -6.3336e-02,
2.2661e-01, -4.2997e-01, -4.2013e-01,
1.7549e-02, -1.3142e-01, -3.1663e-01,
1.3617e-01, 1.4229e-01, -1.0707e-02,
-1.0986e-02, 2.8816e-01, -3.6239e-01,
2.2579e-02, -1.4332e-02, 7.1339e-03,
-1.4357e-01, -9.7608e-02, 1.4646e-01,
-5.3856e-02, 3.3898e-01, -2.4936e-01,
-2.9500e-02, 2.1799e-02, 1.1901e-02,
3.6996e-02, 2.1291e-02, 3.2150e-02,
9.8375e-02, 2.4476e-01, 2.2896e-01,
1.8392e-01, -7.4510e-02, -1.0152e-01,
4.4757e-02, -4.8053e-03, -6.7254e-02,
-4.8370e-02, -7.8975e-02, -3.6007e-01,
-3.8160e-02, 8.7707e-02, -1.4986e-01,
-8.7544e-03, -4.3522e-02, 7.3822e-02,
-1.4523e-01, 1.1433e-01, 4.4109e-02,
-1.6025e-03, 2.5459e-02, -9.3562e-02,
-2.9192e-02, -1.0975e-01, -5.0943e-02,
-1.1215e-01, 1.9907e-01, 7.9934e-02,
3.7066e-02, 3.0796e-01, -1.4034e-01,
-8.2315e-02, -2.0182e-02, -1.2824e-02,
-4.8007e-03, 1.2655e-01, -2.5157e-02,
2.7796e-02, -4.3032e-02, 2.5397e-02,
6.9377e-02, 2.3642e-01, 1.2713e-01,
2.7878e-02, -1.5325e-01, -1.4871e-01,
1.5800e-02, -4.5935e-02, 1.7370e-01,
4.8058e-02, -1.8725e-01, -6.7048e-03,
-1.3932e-01, -6.0768e-02, -1.6976e-01,
-2.1189e-02, 1.0311e-02, -2.2970e-02,
-7.0546e-03, 7.9481e-02, 1.2146e-02,
4.2666e-02, 3.5383e-01, 1.4381e-01,
5.4384e-02, -9.3862e-02, 4.8870e-03,
2.1141e-02, -6.6826e-02, -1.8526e-01,
1.3309e-01, 3.3452e-01, 1.1058e-02,
-1.6967e-02, 1.1094e-01, 5.3230e-02,
3.0409e-02, -4.7613e-02, -1.7737e-01,
-1.6678e-02, -7.8644e-02, 1.1743e-01,
7.3322e-02, -1.1354e-01, -1.5737e-02,
-1.2397e-03, -1.4685e-02, -1.0192e-02,
1.6045e-01, 3.6331e-02, 1.2219e-01,
1.3123e-01, 5.7578e-02, 1.0291e-01,
1.7424e-01, 1.0688e-01, 1.4263e-01,
8.9942e-02, -2.7141e-02, 3.1238e-02,
-4.0240e-02, -1.0930e-01, -2.1276e-01,
1.0357e-01, 5.7673e-02, 1.0356e-02,
-2.0864e-01, -1.9405e-01, 2.5094e-01,
-4.8277e-03, -1.3758e-01, 1.1562e-01,
-1.0358e-01, 2.0631e-01, -9.1445e-03,
-1.7602e-01, 1.0200e-01, 3.0032e-02,
-1.1495e-02, -4.5077e-02, -6.4748e-02,
-2.3072e-02, -3.2342e-02, 1.4503e-02,
-3.7052e-02, -1.2206e-01, 5.5395e-02,
2.8331e-02, -4.2812e-03, 6.9807e-02,
4.3593e-02, -6.7373e-03, 1.2760e-02,
3.2896e-03, -2.4007e-01, -5.2920e-02,
2.5193e-02, -2.1480e-01, 8.4654e-02,
2.2642e-02, 8.2132e-02, -2.3864e-02,
-2.9726e-01, 8.0405e-02, -1.3190e-02,
-1.1310e-01, -4.4342e-01, -6.3536e-02,
-6.7090e-02, 1.1797e-01, 1.5315e-01,
7.7829e-02, -1.4494e-01, 1.0233e-01,
9.7059e-02, 1.2772e-01, -2.4394e-02,
-2.6179e-02, 2.6721e-02, 1.1707e-02,
-4.8024e-02, -2.3366e-01, -1.6978e-01,
-2.4402e-01, -2.8572e-01, -2.4053e-02,
-2.7451e-03, 7.1959e-02, 4.4706e-02,
-1.9900e-01, 2.1353e-01, 1.0625e-01,
4.0246e-01, 4.2323e-01, 3.4046e-02,
-1.6943e-01, -2.0221e-01, -1.6369e-01,
1.3882e-01, 2.1717e-01, -1.3581e-01,
1.3975e-01, 1.1980e-01, 1.8888e-02,
-1.8110e-01, -2.6143e-01, -1.0109e-01,
5.5844e-02, -1.2175e-01, 3.4447e-02,
8.9688e-02, 2.4641e-01, 2.3287e-01,
-5.8259e-02, -1.3656e-01, -1.3936e-02,
-8.3429e-03, 2.3026e-01, 1.2302e-01,
-2.2969e-02, 6.0932e-02, 3.4749e-02,
1.2910e-01, 2.4008e-01, 1.8908e-01,
-5.8776e-02, 3.8121e-01, 8.1312e-02,
9.1175e-02, -1.8729e-02, -4.6156e-02,
3.7493e-02, -3.5877e-02, -9.9651e-03,
1.5864e-01, 1.3611e-01, 6.7880e-02,
2.2216e-01, 9.3697e-02, 7.4782e-02,
-1.0861e-01, -2.5824e-01, 6.6455e-02,
9.2238e-02, -2.3448e-01, -3.4057e-01,
-2.9658e-01, 9.4698e-03, 1.9315e-01,
-5.2396e-02, 1.2310e-01, -5.2917e-02,
-4.3708e-03, 1.9560e-01, -2.4309e-02,
-6.7388e-02, -8.8839e-02, -2.0907e-02,
4.6550e-02, 3.4119e-02, 6.0977e-02,
-1.0054e-02, 1.4411e-01, 1.5622e-01,
1.7401e-02, 2.5685e-01, -9.1853e-03,
-4.4530e-02, -1.8623e-01, -8.4557e-02,
9.5962e-02, 2.6491e-01, 1.7854e-01,
-2.0547e-02, -1.2023e-01, -7.6897e-02,
-1.3418e-01, -1.4960e-01, 1.6292e-01,
-1.7275e-01, -6.0181e-02, -2.7034e-02,
-7.4189e-02, -3.5566e-02, 1.3995e-01,
3.0758e-02, 3.3476e-02, 6.9837e-03,
-6.1089e-02, -9.6021e-02, 7.1716e-03,
1.0389e-01, 4.7963e-02, 9.5921e-02,
4.4569e-02, 1.2230e-01, -1.4417e-01,
-1.2825e-02, 3.1980e-01, -3.5905e-01,
-1.2557e-01, -7.5283e-02, -1.2343e-01,
1.9791e-01, 7.9003e-02, 3.1163e-02,
1.0969e-01, 1.6839e-01, -2.5816e-01,
-1.2617e-01, 1.3686e-01, -2.1078e-01,
-2.1870e-02, -1.8378e-01, -2.8893e-01,
-8.2523e-02, -3.0475e-02, 9.6007e-02,
1.0669e-01, -1.4581e-03, 3.2441e-01,
-8.1872e-03, 1.1690e-02, -4.0179e-02,
-1.0835e-01, 3.6112e-01, -4.5990e-02,
-1.2355e-01, -1.3372e-01, 3.8136e-02,
-9.1530e-03, 3.5432e-02, 4.3950e-02,
-8.6859e-02, 1.5887e-01, 1.2796e-02,
1.3554e-02, -1.5669e-01, -1.4371e-02,
-4.6609e-02, 1.7114e-01, -7.8284e-02,
1.7611e-01, 4.1204e-01, 9.3281e-02,
1.1420e-01, 1.2951e-01, -7.6025e-02,
-5.4831e-02, 9.7574e-02, 3.2839e-02,
3.8475e-02, -6.0247e-02, -2.9627e-02,
-2.4367e-02, 1.3143e-02, 4.7017e-02,
2.3800e-02, -2.4046e-02, -5.7044e-02,
2.7280e-02, 7.8573e-01, 1.0079e-02,
6.4100e-02, 5.1584e-02, 7.9653e-03,
-8.9480e-02, -1.6207e-01, -8.9418e-02,
-3.5589e-02, 3.5903e-01, -1.8381e-01,
9.2356e-02, 8.8046e-02, -5.0229e-02,
1.8609e-02, 1.1243e-01, 5.2599e-02,
-1.3374e-02, -3.3097e-01, 6.5346e-02,
2.6760e-01, -1.0281e-01, 1.1607e-02,
7.6576e-03, -3.5957e-02, 3.1924e-02,
-7.0088e-02, 9.1241e-02, 1.2827e-02,
3.7165e-02, 7.0273e-03, -7.3945e-04,
-6.5406e-03, 7.2666e-02, -5.7348e-02,
-1.9100e-01, -7.4449e-02, -1.2496e-01,
1.5299e-01, -8.8047e-02, -2.1810e-02,
-3.0241e-02, -7.4310e-03, -8.7682e-02,
-2.2479e-02, 9.6008e-02, -8.4539e-02,
-2.8915e-02, 1.7538e-01, -3.7735e-02,
-9.8463e-03, -6.9618e-02, -2.6095e-01,
9.9950e-02, 5.0534e-01, -1.8812e-01,
-1.1986e-01, 7.1166e-02, -2.4769e-02,
8.8529e-02, 9.8348e-02, 2.1136e-02,
-9.0337e-03, 1.3679e-01, -1.2115e-01,
-6.2478e-03, 1.1436e-01, -3.4610e-02,
-2.7350e-02, 1.0702e-01, 1.6220e-02,
1.0912e-02, 1.0953e-01, 8.6762e-02,
2.9348e-03, -2.2035e-02, 1.2376e-01,
7.0102e-02, -1.0945e-01, -1.6640e-01,
-3.9916e-03, -2.6658e-02, -9.7031e-02,
-3.0047e-02, 1.6631e-03, -5.5031e-02,
-7.9624e-02, 1.9976e-01, 1.9582e-01,
2.1377e-01, 3.5835e-01, 1.7012e-01,
-9.7751e-02, 4.9143e-01, 1.0988e-01,
8.4055e-02, -7.3187e-03, -9.8808e-02,
5.0590e-02, -8.9291e-02, -6.6857e-02,
9.6737e-02, -3.0699e-01, 2.2889e-01,
2.6727e-40, -5.2704e-40, -4.5038e-40,
-3.3108e-40, 5.2330e-40, -1.2724e-40,
-3.2957e-40, -5.8613e-40, 2.1618e-40,
-4.3882e-40, -3.3950e-40, 5.9372e-40,
2.7277e-40, -1.3741e-40, -3.3597e-40,
5.0687e-40, 4.7873e-40, -3.2116e-40,
-6.1388e-40, -6.0790e-40, -5.2667e-40,
-5.6524e-40, -6.1696e-40, -5.9796e-40,
1.5824e-40, -5.2002e-40, -5.8960e-40,
-5.9860e-40, 3.6419e-40, 2.9975e-40,
-5.8988e-40, 3.3994e-40, -5.0611e-40,
3.6410e-40, 2.9550e-40, 4.7468e-40,
2.7503e-40, -3.4103e-40, 6.0339e-40,
-1.7691e-40, 6.7170e-41, 1.7101e-40,
2.7166e-40, 4.3023e-40, 2.7735e-40,
-3.1937e-40, -4.9247e-40, -6.2495e-40,
5.2938e-40, -3.3702e-40, 1.4976e-41,
1.4031e-40, -4.6995e-40, -5.2409e-40,
2.5460e-40, 2.6670e-40, -4.5339e-40,
4.2896e-40, -5.7141e-40, -1.7003e-40,
2.3597e-40, 1.3748e-40, 4.6163e-40,
4.0680e-41, -6.1642e-40, 2.7304e-41,
5.2250e-40, -3.9481e-40, -6.1808e-40,
1.9462e-40, 2.6005e-40, -2.7281e-40
}
,
{
1.3625e-02, -8.5594e-02, -1.9901e-01,
-6.4636e-02, -1.9030e-02, 4.1963e-02,
-7.5507e-02, -2.4474e-01, -4.2621e-02,
2.8195e-02, 7.3102e-02, -9.3331e-02,
7.7093e-02, 1.7800e-01, -7.6451e-02,
2.8565e-02, -1.3540e-01, -1.9169e-01,
-1.8583e-02, 3.0135e-02, 8.1094e-03,
-1.2835e-01, -1.8041e-01, -8.9020e-02,
-8.2731e-02, 3.7861e-02, -9.4014e-02,
4.6595e-02, 2.2052e-02, -1.5867e-01,
-1.0937e-02, 1.0030e-01, -1.3018e-01,
-9.1844e-02, -1.7508e-01, 2.2087e-01,
-9.3080e-02, 9.8069e-02, -7.0154e-02,
-6.6063e-02, -2.2142e-01, 4.1058e-01,
-6.5947e-02, -5.4662e-02, 9.9412e-02,
-5.1938e-02, 3.0932e-03, 1.8126e-01,
3.6701e-02, -3.0349e-01, 9.9839e-02,
2.5810e-02, 2.3644e-01, -2.4461e-01,
2.1054e-01, 1.5630e-01, -1.9587e-01,
5.0146e-02, -1.8844e-02, 3.6675e-01,
-4.0389e-03, 3.1596e-01, 3.6771e-03,
-2.2256e-40, 1.4272e-40, -2.0732e-40,
5.5913e-40, -6.0538e-40, 1.2791e-40,
4.5825e-41, 4.1080e-41, -1.8211e-40,
2.2687e-01, -5.8992e-02, 4.7796e-03,
6.0603e-01, 2.7961e-01, 1.5973e-02,
2.3035e-01, 1.3031e-01, -9.9280e-03,
-4.7235e-02, 5.1773e-02, -4.8586e-02,
-1.4510e-01, -1.7336e-01, 1.0981e-01,
-2.0303e-01, -1.6008e-02, -1.8524e-03,
-2.3440e-01, -3.2373e-02, -6.7911e-02,
-1.6256e-01, 1.2316e-01, 2.7859e-02,
8.5089e-04, -3.7401e-02, -1.8672e-02,
-1.0418e-01, -7.8407e-02, -1.8413e-02,
8.2834e-02, 2.3128e-01, 3.2983e-02,
3.1099e-02, -6.4485e-02, -8.1659e-02,
1.9152e-01, -1.9609e-02, 2.7364e-02,
1.0458e-02, -1.2507e-01, 4.1334e-02,
-4.6215e-02, 5.6944e-02, 2.1477e-02,
-1.4934e-01, -6.8383e-02, 2.7957e-02,
-3.6846e-01, 4.8766e-01, 6.4000e-02,
-3.9621e-02, -8.1667e-03, 4.5997e-02,
-6.1391e-02, 1.2976e-02, -3.2152e-02,
7.5767e-02, 1.2931e-01, -2.3498e-02,
4.0320e-02, 1.3876e-02, 1.1022e-02,
-6.2401e-41, 5.8564e-40, 3.9473e-40,
-5.6890e-40, -2.6022e-40, -2.9841e-40,
-4.2456e-40, -1.1546e-40, 4.4955e-40,
-4.2969e-02, -1.0995e-01, 1.3021e-01,
1.0142e-01, 5.2225e-01, -5.5486e-02,
-7.2349e-02, 8.5470e-02, 2.3438e-02,
-1.0690e-01, -1.4370e-01, -1.2632e-01,
2.8754e-02, 1.1662e-01, 5.6515e-02,
-1.5726e-01, -1.4945e-01, -4.4956e-02,
1.6574e-01, -5.6894e-02, -2.0851e-01,
8.1498e-03, -2.5441e-01, -1.4412e-01,
-1.0959e-02, -2.5811e-02, 8.8934e-02,
6.3594e-02, -9.3314e-02, 7.8247e-02,
4.6795e-02, -2.2774e-01, 7.1041e-02,
1.4830e-01, 1.9911e-01, 5.1978e-02,
7.4936e-02, 2.3104e-02, 6.3928e-02,
-1.3118e-02, 6.7544e-02, 7.9514e-02,
2.2335e-02, -9.9442e-02, 6.8070e-03,
2.4395e-02, -3.3576e-02, 5.5508e-02,
-4.0872e-02, 5.4501e-02, -5.7051e-02,
8.6621e-03, -1.5361e-01, 1.2630e-01,
-2.2344e-01, 1.3335e-01, -1.1688e-01,
-2.4232e-01, 3.3319e-01, -1.2580e-01,
-2.2169e-02, 2.0594e-01, 2.6521e-02,
4.1883e-40, -3.4540e-40, 4.9152e-40,
-1.5711e-40, 3.3927e-40, -5.5069e-40,
5.5831e-40, -5.2011e-41, 1.0351e-40,
1.7989e-01, 2.3787e-02, 5.7447e-03,
4.8748e-01, 3.0152e-01, 3.5517e-02,
2.2155e-01, 1.8812e-01, 3.0994e-02,
7.8657e-02, -7.1135e-02, -5.8293e-02,
-1.4220e-01, 1.6004e-02, -2.5180e-02,
-1.6811e-01, -2.3441e-01, 1.4810e-02,
5.3140e-02, -1.2904e-01, -1.5105e-02,
5.4525e-02, -1.5418e-01, 6.6507e-02,
8.3947e-02, -1.1975e-01, 5.3902e-02,
8.0834e-02, -2.4321e-01, -1.0282e-03,
3.1276e-03, 3.2495e-01, -1.3238e-02,
4.5285e-02, 5.8777e-02, -1.3231e-01,
-6.0928e-03, 8.7145e-02, 6.2031e-02,
-5.3919e-01, -6.8810e-02, -1.0755e-01,
-2.2571e-02, 2.6237e-02, -6.8731e-03,
-6.6771e-02, -2.0586e-01, 4.7722e-02,
-3.4968e-01, 3.0912e-01, 2.4487e-01,
-4.9537e-02, -5.2779e-04, 6.7840e-02,
1.7583e-02, 3.3222e-02, -5.7070e-02,
-2.3250e-01, 1.4470e-01, -4.9895e-02,
3.3147e-02, 8.6319e-02, 4.4719e-02,
-6.9454e-41, 2.0308e-40, -1.1977e-40,
5.9045e-40, -2.6129e-40, 4.8298e-40,
4.7288e-40, 6.0736e-40, 2.2462e-40,
-4.0294e-02, -9.1437e-03, -2.4926e-02,
-2.1269e-01, 1.1602e-01, 1.4383e-02,
5.1456e-02, 6.9047e-02, 1.6519e-02,
6.3737e-02, -9.0181e-02, 7.0716e-02,
7.0061e-02, 7.9046e-02, -4.3925e-02,
7.4396e-02, -5.2797e-02, 3.8125e-02,
7.5999e-02, -5.1307e-02, 2.4326e-03,
-3.1716e-02, -1.2567e-01, -3.3898e-02,
8.4925e-02, -5.2404e-02, 2.8535e-02,
9.6844e-03, 4.6980e-02, 3.8552e-02,
-5.7110e-02, 3.2163e-02, 1.5219e-02,
6.6905e-02, -2.7934e-02, 1.4184e-03,
-2.4239e-02, -8.6317e-03, -2.3295e-03,
-2.3065e-02, 1.0076e-01, 2.1562e-03,
-1.3647e-02, -3.4262e-02, 2.5777e-02,
7.6601e-02, 1.3654e-01, 2.1458e-03,
1.4542e-01, 3.6310e-01, 1.6266e-01,
-5.8465e-02, 4.3751e-02, 1.9227e-02,
9.1783e-03, -5.9547e-02, -1.8234e-02,
-5.3399e-02, 1.9218e-01, -4.6238e-02,
-1.9052e-01, 1.4635e-02, 2.9536e-02,
1.4621e-40, -5.5132e-40, -4.6215e-40,
4.3948e-40, -2.7285e-40, -5.5709e-40,
1.9428e-41, -4.0333e-40, -5.4469e-40,
9.3126e-02, -1.3236e-01, 9.9350e-02,
-1.3308e-01, 3.5030e-01, 9.2221e-02,
1.1783e-01, 1.6648e-01, -7.9150e-02,
2.2654e-01, -1.2546e-01, -1.2354e-01,
-1.6457e-01, -6.0740e-02, -3.1069e-02,
-8.3203e-02, -1.8064e-01, 4.6900e-02,
1.2059e-01, -1.0569e-01, -7.1196e-02,
-9.2991e-02, -1.7587e-01, 1.3100e-03,
-1.5492e-01, -1.3849e-01, 1.2245e-01,
-5.5276e-02, -9.7867e-02, 3.5550e-02,
-6.0264e-02, 4.7760e-02, 6.0242e-02,
-5.4096e-03, 2.4646e-01, 6.3592e-01,
5.8559e-02, 6.1117e-02, 8.0334e-02,
-4.4582e-03, -1.2028e-01, 8.7394e-02,
-2.5880e-02, -1.2206e-01, 1.2199e-01,
4.1990e-02, -1.3283e-01, 4.9047e-02,
-4.9532e-02, 2.7688e-01, -4.6064e-03,
-2.8812e-03, -2.4404e-01, 5.8614e-02,
-1.4262e-01, -1.2810e-03, -1.2060e-01,
-8.3595e-02, 5.6532e-02, -7.7556e-02,
-1.3364e-01, -1.3883e-01, -1.2335e-01,
-1.3273e-40, 6.5184e-41, -4.6946e-40,
-4.0031e-40, -1.2807e-40, -3.1584e-40,
1.3009e-40, 2.4187e-40, -1.4202e-40,
-8.8844e-03, 1.0101e-03, -6.0190e-02,
-1.8851e-01, -7.6662e-02, -1.4562e-01,
2.9983e-02, -8.1533e-02, 1.1256e-02,
1.0205e-01, 6.7850e-02, -1.0911e-01,
-1.2846e-01, -5.4605e-02, 6.2182e-02,
-1.0797e-01, -5.1281e-02, -1.2036e-02,
-8.1693e-02, -7.0432e-02, 1.6990e-01,
-1.7329e-01, -2.2084e-01, -3.0977e-02,
8.2771e-02, -3.3089e-01, -1.4842e-01,
1.9576e-02, -1.5953e-01, -1.0348e-01,
6.6014e-02, 6.0094e-01, -6.9891e-04,
7.4969e-02, -1.4250e-01, 4.3221e-02,
1.6796e-02, -6.8125e-03, 4.7028e-02,
-3.3421e-01, -2.2987e-01, 4.2936e-02,
9.3985e-04, 9.0827e-02, 2.4211e-01,
-8.1571e-02, -1.0276e-01, 1.9092e-01,
2.1112e-01, 2.6837e-02, -2.5822e-01,
-1.3290e-01, 1.6135e-01, -2.7672e-02,
3.4465e-01, -8.3286e-03, -6.1936e-02,
2.7406e-01, -6.8357e-02, 1.7426e-01,
-9.0872e-02, 1.2999e-01, 7.2366e-02,
3.0944e-40, -1.2808e-40, 2.9336e-40,
5.5561e-42, 3.0978e-40, 1.0027e-40,
-1.5881e-40, -2.9858e-40, 3.1599e-41,
-9.1935e-02, -2.2666e-04, -6.2821e-02,
-1.8605e-01, 3.0238e-01, 3.2759e-02,
-5.0771e-02, 1.4585e-02, -1.0872e-01,
2.5511e-02, -9.3394e-02, 1.4810e-02,
-6.2906e-02, 9.2472e-02, 1.2845e-02,
-2.9041e-01, -9.6489e-03, -2.7277e-02,
-6.9896e-02, -1.1645e-01, -5.9870e-02,
-2.8037e-02, -2.2649e-01, 5.1781e-02,
-1.4588e-02, 4.8753e-02, -2.8256e-02,
-1.6462e-02, 8.0795e-02, 3.6222e-02,
8.0392e-02, 3.0118e-01, 2.0021e-01,
1.0394e-01, 6.4196e-01, 4.9545e-01,
2.1242e-02, -1.2514e-01, 1.0066e-01,
-4.7676e-02, -2.0736e-02, -5.6951e-03,
-8.3021e-02, 4.6763e-02, 1.7551e-01,
2.0038e-02, 1.8084e-01, 1.3244e-02,
1.0280e-02, 2.8740e-01, 8.9837e-03,
-2.9437e-02, -3.7366e-01, -1.1861e-01,
-4.8248e-03, -1.2970e-01, -1.8680e-02,
1.8458e-01, 5.6509e-02, 1.2734e-01,
1.9423e-01, -3.6960e-01, -2.5555e-02,
6.7959e-41, -3.2251e-40, -3.0631e-40,
-4.0701e-40, 9.7399e-41, 2.2917e-40,
2.0169e-40, 5.7891e-40, -4.1286e-40
}
,
{
5.6253e-02, 1.0118e-02, -8.2749e-02,
-6.4074e-02, 4.0723e-02, 1.1657e-02,
-1.1560e-01, -3.5596e-03, -2.6713e-02,
-7.9090e-02, -2.9223e-01, 1.5759e-01,
6.8756e-02, 1.5738e-01, 1.5413e-01,
-6.1288e-02, -1.2536e-01, -1.5966e-01,
1.1165e-01, 5.0211e-02, -1.0338e-01,
-5.2364e-04, 1.7660e-01, -2.2504e-03,
-1.7697e-01, 1.8500e-02, 2.0693e-02,
-2.5907e-02, -1.4201e-01, 8.4467e-02,
1.1138e-02, 2.1769e-01, -4.2422e-01,
6.5046e-02, 2.6834e-02, 2.9047e-03,
-1.2130e-01, -5.1773e-01, -8.0393e-02,
3.0204e-02, 3.5952e-01, 1.6681e-01,
-9.4720e-04, 7.7291e-02, 8.3039e-02,
3.4689e-01, -1.2389e-01, -2.0666e-01,
-2.9650e-02, 1.1102e-01, -1.4782e-01,
3.2193e-02, -3.9862e-02, 1.6440e-02,
-8.4264e-02, 1.0192e-01, -6.4256e-02,
2.2950e-02, -6.6511e-02, -6.3814e-02,
4.3744e-02, -1.0557e-01, -1.2045e-02,
1.6330e-01, 6.6130e-01, 1.5497e-01,
1.7103e-01, 1.5073e-01, 1.7400e-01,
9.0985e-04, 1.0917e-02, -1.3322e-02,
-6.4273e-02, -6.2178e-02, -7.7223e-02,
-1.0332e-01, -2.1072e-01, -2.2843e-03,
3.2717e-02, -6.3754e-02, 5.0359e-02,
-5.2566e-02, 6.2090e-02, -1.5614e-02,
1.4570e-02, -1.0243e-01, 1.3091e-01,
-2.9988e-02, -7.5897e-02, -9.4541e-04,
-2.7999e-01, -4.7415e-03, 5.6419e-02,
7.0565e-02, -4.9273e-01, -1.2936e-01,
5.5685e-02, -5.8924e-03, -3.1967e-02,
8.8602e-02, 2.9337e-01, 1.3753e-01,
1.0063e-02, 1.6348e-02, 1.0063e-01,
3.6230e-02, 1.7968e-02, -1.1624e-01,
-2.2488e-02, 1.3474e-01, -1.1419e-01,
2.8576e-02, -7.4794e-02, -7.7261e-02,
5.8874e-02, -2.9448e-03, 6.0207e-02,
1.4642e-01, 1.2321e-01, -2.4936e-01,
2.2609e-02, -2.8171e-01, 1.1510e-01,
2.6056e-02, -2.7532e-02, -4.7505e-02,
-2.8762e-02, -1.2610e-02, -8.3766e-02,
-5.0992e-02, -5.7269e-03, -7.0981e-02,
-9.6191e-02, -9.2384e-02, -5.3328e-02,
2.3989e-01, 3.9819e-01, 1.8451e-01,
3.6888e-02, 1.1023e-01, 4.4804e-03,
-4.4140e-03, -4.8275e-03, 2.0018e-02,
-2.4346e-02, -6.5546e-02, -4.6065e-03,
2.2298e-01, 2.8810e-01, 1.4071e-02,
-1.7315e-01, -5.7961e-02, -9.9136e-02,
3.6456e-02, -1.5518e-02, 6.4490e-02,
4.6983e-02, 5.2743e-02, 3.0802e-01,
6.7940e-02, 5.8777e-03, 3.1155e-01,
9.9510e-02, 2.7974e-02, -6.6716e-02,
3.7042e-01, 2.0813e-01, -3.1581e-02,
7.9064e-02, -1.3699e-01, -4.4722e-02,
-8.4753e-03, 8.0676e-02, 1.5771e-01,
-1.1467e-01, 5.6269e-02, 1.1369e-01,
-1.4727e-02, 3.7263e-02, -2.0554e-01,
8.3383e-02, 4.5848e-02, -1.1732e-02,
4.5494e-02, -2.1406e-01, 6.0591e-02,
4.6503e-02, -1.0362e-01, 3.8794e-02,
-4.6633e-01, 1.4504e-01, 1.4999e-01,
2.9642e-01, -4.8807e-01, -1.6012e-01,
1.6708e-01, 9.5313e-02, -7.5981e-02,
-4.2655e-02, 9.2470e-02, -7.7242e-02,
-2.1021e-01, 1.2423e-01, 1.4967e-02,
-5.4129e-02, 7.4355e-02, -4.7068e-02,
-1.6048e-01, 9.8742e-02, 4.4282e-02,
-6.0187e-02, 1.9495e-01, 8.3291e-02,
-7.5190e-02, -6.8429e-02, 3.7391e-02,
5.1413e-04, 1.5098e-01, -1.1549e-01,
1.6875e-01, 1.8040e-01, -1.3162e-01,
7.7101e-02, 2.0816e-01, 7.6289e-02,
-1.7528e-02, 1.4408e-02, 3.7500e-02,
3.8647e-02, 1.6850e-01, 1.7535e-02,
-2.8205e-02, 1.0273e-02, 1.6688e-01,
4.3676e-02, 6.9895e-02, 8.1063e-03,
-2.6117e-01, -1.0920e-01, 5.2209e-02,
-5.2749e-02, -1.7062e-02, -9.6808e-02,
2.7324e-02, 9.1342e-02, -5.0968e-02,
1.0689e-01, 5.0565e-01, 4.6004e-01,
-6.6862e-03, 3.4162e-03, 3.3559e-01,
3.5084e-02, 1.9123e-02, 1.0073e-02,
1.6995e-01, 3.4099e-01, -4.0847e-01,
-5.5317e-03, 4.0230e-02, -2.0305e-01,
-8.9786e-02, 1.9667e-01, 3.8111e-02,
3.0607e-02, -1.9084e-02, -6.5114e-02,
8.5394e-02, -1.3992e-01, 1.4988e-02,
-1.5926e-02, -9.1200e-03, -7.2328e-02,
1.3548e-01, 7.1040e-01, -9.4208e-02,
2.5411e-03, -7.2159e-02, 1.0848e-01,
-8.9029e-02, -8.6339e-02, -2.7546e-02,
6.0378e-02, 2.8401e-01, -6.6550e-02,
-3.0486e-02, 5.0307e-02, -1.1084e-02,
2.9732e-02, 9.9960e-02, -7.7408e-02,
3.4940e-01, -5.6048e-01, 2.9053e-02,
-2.6991e-02, 4.9637e-02, -3.9322e-02,
-1.0418e-02, 1.0931e-01, -6.1609e-02,
3.6057e-02, 9.3866e-02, -1.0339e-01,
-1.8572e-02, -2.0889e-02, -7.4531e-02,
-7.3236e-02, -4.5908e-02, 2.2705e-02,
-1.5148e-02, 2.1735e-01, 2.2477e-02,
-3.4153e-02, -2.6939e-02, -5.0167e-03,
6.6774e-02, 2.0168e-01, -7.5083e-02,
5.6608e-02, 2.2799e-01, -3.7473e-01,
-7.2336e-02, 4.4329e-02, -3.6747e-02,
3.5355e-02, 1.8671e-01, -4.0167e-02,
1.2871e-01, 3.5050e-01, 1.8090e-01,
-6.2429e-02, 6.2184e-02, 6.8804e-02,
-8.0164e-02, -2.4387e-02, -5.0309e-03,
1.0089e-01, -3.0008e-02, 1.7251e-02,
-9.4662e-03, -1.4760e-02, 7.3434e-03,
7.3290e-02, 2.2546e-02, -2.9015e-02,
7.9944e-02, -2.6972e-01, 7.1349e-02,
-1.7026e-02, 1.1461e-01, -4.1288e-02,
-5.3732e-02, -2.4618e-01, -1.2890e-02,
8.6133e-02, 1.9503e-01, 8.2202e-02,
-1.0060e-03, -4.5931e-04, -1.8789e-02,
-4.0843e-02, -7.8149e-03, -6.1464e-02,
-7.9364e-02, -5.9647e-02, -5.4059e-03,
1.9553e-01, -2.4079e-01, -7.9538e-03,
5.3620e-02, 1.4198e-01, 6.5651e-03,
2.3512e-02, -2.6609e-02, -4.6435e-02,
1.2499e-02, 5.1079e-02, -2.2713e-02,
-7.1554e-02, 1.0608e-01, 5.8972e-02,
1.8638e-01, -2.1053e-01, -6.4009e-02,
1.0851e-01, 7.2187e-02, 8.9722e-02,
-4.5365e-04, 1.0826e-01, -6.4141e-02,
-2.3874e-02, -4.6307e-02, -2.7813e-02,
1.8385e-02, 9.4687e-02, 6.8374e-02,
9.4526e-02, 1.4432e-02, 1.5937e-01,
1.1292e-01, -3.4274e-01, -1.0813e-01,
-7.4636e-03, 3.7101e-02, 3.7226e-02,
3.7079e-02, -3.9169e-02, -3.7752e-02,
-7.9021e-02, 8.5978e-02, 1.0958e-02,
-5.8576e-02, 5.5931e-02, 4.8301e-02,
-1.3402e-01, -3.3809e-01, -4.4369e-02,
1.4262e-01, 6.5254e-02, -3.3366e-01,
1.2416e-02, -9.0492e-02, -5.8205e-02,
-1.4886e-01, 4.0598e-02, -1.4219e-01,
2.0223e-03, -2.8673e-01, -3.3622e-01,
1.9191e-02, -2.2104e-02, 1.9048e-02,
6.0021e-02, 2.2520e-01, -5.3972e-02,
1.6226e-01, -2.1918e-01, -5.2117e-02,
-6.2363e-03, 2.0266e-01, -7.3323e-03,
1.1137e-01, -1.9300e-02, -5.4983e-02,
-1.8338e-01, 6.2511e-01, -1.7909e-01,
1.7003e-01, 1.7902e-01, 5.4462e-02,
5.6847e-02, -7.4696e-02, -1.1354e-02,
1.0544e-01, -1.4918e-01, 4.8208e-02,
-5.6262e-02, -2.3303e-01, -2.9916e-02,
-3.3261e-02, 1.3287e-01, 1.9831e-02,
-1.3907e-01, -1.6180e-01, -7.2323e-03,
-5.1689e-02, 6.3121e-02, -1.4480e-01,
1.1143e-01, 4.9625e-02, -5.4369e-02,
-3.9247e-01, 2.3412e-01, -3.6726e-02,
-1.1468e-02, 3.4045e-02, 6.6454e-02,
-5.0103e-02, 6.1740e-02, 4.2922e-03,
1.7669e-01, -8.1250e-03, 6.3694e-03,
-6.7723e-02, 7.4576e-02, 1.0113e-02,
1.1264e-01, -4.4691e-02, -5.3575e-02,
3.4691e-02, -1.2201e-02, -8.4221e-02,
2.3677e-01, 3.9073e-01, 2.4710e-02,
-8.4580e-02, -1.0747e-01, -6.5695e-02,
1.5386e-01, 1.4041e-01, 6.9961e-03,
2.6138e-02, 2.3149e-02, -1.8820e-02,
-3.3541e-02, 3.2089e-02, -1.8916e-02,
1.0564e-01, -7.5319e-02, -5.4282e-02,
-6.9388e-03, -2.0873e-02, 5.6100e-02,
2.3524e-02, -6.4296e-02, 5.8950e-02,
-3.1415e-03, -4.1203e-02, 1.0781e-01,
1.7848e-02, -2.9535e-02, -1.6412e-02,
-4.6649e-02, 8.1277e-02, -5.9918e-02,
8.1522e-02, -9.2037e-02, 8.1039e-03,
-6.5541e-02, 5.1811e-02, -1.4380e-03,
5.0419e-02, 9.3091e-03, -2.8054e-02,
-3.0979e-02, -2.5366e-02, 3.5265e-02,
-3.7730e-02, 5.7574e-02, 3.4683e-02,
4.8819e-03, -2.9519e-02, 3.7740e-02,
6.4546e-02, -3.7272e-01, -8.5393e-02,
-3.0223e-02, -7.7899e-02, 2.7365e-03,
2.2282e-02, -3.3440e-02, 1.9048e-02,
2.3275e-02, -2.1153e-02, -2.0385e-02,
-4.6245e-02, 2.2443e-02, -3.0206e-02,
-2.5302e-02, -1.1418e-02, 4.8228e-02,
5.8367e-02, -4.3062e-02, 2.2814e-02,
-4.6279e-02, 5.0052e-02, 2.2961e-02,
-5.4984e-02, 1.4773e-01, -2.5546e-02,
3.3025e-02, -1.0138e-01, 6.3886e-02,
1.2403e-02, 1.6215e-02, 1.0783e-02
}
,
{
2.5042e-02, -5.3266e-02, 3.8484e-02,
3.7189e-03, 1.0493e-01, 1.4459e-01,
-3.7442e-02, -1.5744e-01, 1.9957e-01,
-1.9203e-02, 1.6256e-02, 4.2906e-03,
-3.1637e-02, 5.0287e-01, -6.9504e-02,
1.4677e-03, -8.9984e-02, -9.0376e-02,
4.0578e-02, 2.4004e-02, 3.4044e-03,
7.5916e-02, -1.3564e-01, -9.0296e-02,
3.4156e-02, 7.2494e-02, -2.0037e-02,
-6.4614e-02, -1.7301e-03, -3.3444e-02,
-2.7950e-01, 7.1351e-01, 4.2825e-02,
2.4797e-02, 5.4162e-04, -8.9676e-02,
3.8002e-02, -2.7692e-02, -1.7757e-02,
1.9356e-01, 1.9598e-02, -1.0862e-01,
2.5734e-02, 1.1703e-02, -7.3912e-02,
-6.0213e-04, 1.6024e-01, -6.4591e-03,
3.1779e-02, -3.1049e-01, 1.2684e-02,
-1.0098e-01, -1.8839e-01, 5.1387e-02,
5.2004e-02, 3.1489e-01, 5.9716e-01,
-7.2238e-02, 3.4332e-01, -2.0655e-01,
1.1013e-03, -5.0328e-02, -4.6118e-02,
9.4442e-04, 2.7964e-02, 1.7672e-02,
-8.6022e-02, -3.8280e-02, 2.8017e-04,
3.3824e-02, -6.7883e-02, 1.0529e-02,
-6.5982e-02, 1.1385e-01, 3.0091e-03,
1.2330e-01, 6.1876e-01, 5.7145e-02,
-4.3835e-02, -6.8186e-01, -1.0917e-01,
3.2006e-02, -2.0627e-03, -6.9043e-02,
7.2219e-02, -3.2393e-01, -2.6657e-02,
1.3523e-02, 1.8099e-01, 4.9168e-02,
7.1367e-02, 9.8283e-02, 1.0425e-01,
2.2286e-01, -5.9374e-01, 1.0014e-01,
6.5700e-02, 1.3618e-02, -7.4045e-02,
1.0481e-01, 3.0734e-02, 1.0431e-02,
-2.1314e-01, -7.2817e-02, 1.2036e-01,
-5.4180e-02, 1.0500e-01, 2.7821e-02,
-5.0657e-02, 8.7702e-02, 7.0234e-02,
9.0349e-02, 1.4905e-01, 1.1612e-01,
5.9924e-02, 2.4928e-01, 1.7078e-01,
-5.9110e-02, -7.4252e-02, 9.8241e-03,
-1.2006e-01, 1.3879e-01, -1.4322e-02,
-7.5463e-02, 1.4407e-02, -6.9202e-03,
7.0279e-02, 1.7065e-01, -2.5150e-01,
-2.6289e-02, 3.8421e-01, -2.2051e-01,
-2.8918e-02, 4.0074e-02, -7.1296e-02,
1.0357e-01, -1.8885e-01, 2.3780e-02,
-1.8884e-01, -4.3326e-01, -1.1465e-01,
3.3497e-02, -1.3462e-01, -3.4127e-02,
-1.2731e-02, 5.4326e-02, -2.6581e-02,
5.1753e-02, 6.8200e-03, 4.3246e-03,
-6.9963e-02, -1.5618e-01, 2.5192e-01,
2.2890e-02, 6.1421e-02, 5.2832e-02,
-9.8369e-02, -1.1452e-01, 1.7420e-01,
2.0392e-01, -1.1322e-01, 9.8462e-02,
-3.3547e-02, -2.8993e-01, 7.0080e-02,
8.2478e-02, -1.9881e-01, 1.2849e-01,
-2.7802e-01, -1.5621e-01, 6.2712e-02,
1.3028e-02, 1.4716e-01, 2.0434e-02,
-4.4071e-01, 3.8359e-01, -1.6655e-03,
-2.0297e-01, 1.5631e-01, 7.7086e-02,
9.6714e-03, -5.5842e-03, 7.9155e-03,
1.4525e-01, -3.2228e-01, 1.1454e-01,
1.4527e-01, -3.0399e-02, -6.7043e-02,
9.4233e-03, -1.1296e-02, -1.0927e-01,
7.9300e-02, 5.5286e-02, -1.1558e-01,
3.8173e-01, -5.4351e-02, -1.7890e-01,
5.4882e-02, 1.5119e-01, 1.8363e-01,
-8.8223e-02, -9.0083e-02, 4.8221e-01,
4.0890e-02, 5.6429e-02, -2.8538e-01,
1.2102e-02, -1.8177e-02, -3.1643e-03,
-6.9064e-02, 3.1853e-04, -7.0113e-02,
9.7308e-02, 1.0691e-01, -6.5919e-02,
-1.4536e-40, -1.7049e-40, -2.6781e-40,
4.5792e-40, 1.4489e-40, 1.3645e-40,
-5.8774e-40, -2.2505e-40, -4.7571e-40,
3.3670e-40, 1.5398e-40, -3.3819e-40,
2.6303e-40, -1.9434e-40, -5.5555e-40,
-4.3830e-40, -2.8750e-40, -3.0788e-41,
5.6364e-40, 3.1307e-40, -2.3064e-41,
2.8909e-40, -5.8115e-40, 2.9852e-41,
-1.9273e-40, -7.5503e-41, -6.0335e-40,
5.8073e-40, 2.9252e-40, -1.3038e-40,
5.2260e-40, 3.8172e-40, -2.0389e-40,
-2.1905e-41, 1.8473e-40, -2.9226e-40,
2.9957e-41, 2.6068e-40, 6.1324e-40,
-4.3013e-41, 5.1421e-40, -4.1157e-40,
2.1416e-41, -1.6614e-40, -3.0843e-42,
-4.3402e-40, 2.8507e-40, 1.1560e-40,
3.8826e-40, -3.0797e-40, -6.0685e-40,
5.4170e-40, -6.1858e-40, 9.3049e-41,
-1.9491e-40, -1.9211e-40, -6.2723e-40,
3.9906e-40, 1.2356e-40, 3.8682e-40,
2.8630e-40, 6.2303e-40, 5.3034e-40,
-4.1904e-40, 4.8916e-40, -3.6125e-40,
-5.5393e-40, -2.4980e-40, -6.1877e-40,
2.7289e-40, -1.8348e-40, -5.6663e-40,
2.5152e-02, -3.2878e-02, 2.1626e-02,
1.9879e-01, 2.9080e-02, -3.0331e-03,
-2.3380e-01, -2.3578e-02, 1.1871e-01,
-3.1824e-02, -5.5095e-02, 3.1338e-02,
-3.2199e-02, -4.3820e-01, 4.1391e-02,
-4.1207e-02, 3.7475e-01, -1.8548e-01,
-1.4460e-02, -8.7834e-02, -3.2343e-02,
2.4023e-01, 7.1916e-01, -1.8559e-01,
-6.7635e-03, -9.4409e-02, -1.7890e-02,
-5.8334e-02, 1.8886e-01, 6.1547e-02,
-2.6152e-01, 6.6722e-01, -1.2486e-01,
-4.8128e-02, 1.0510e-01, -4.2619e-02,
3.0101e-03, 9.6380e-02, 6.6140e-02,
1.0201e-01, -2.3240e-01, -1.8356e-01,
4.0019e-02, 2.2985e-01, -1.2980e-01,
-1.1400e-01, -1.9221e-01, -3.4158e-02,
2.2871e-02, -6.8684e-01, -1.0856e-02,
2.6311e-02, 2.5422e-02, -1.5190e-02,
3.2182e-02, -5.6346e-02, 3.2655e-02,
-1.6912e-02, 8.4264e-02, -7.9521e-02,
1.2788e-03, -7.1110e-02, 8.6585e-02,
-4.2829e-02, 1.0778e-01, -6.8129e-02,
5.8156e-03, -2.3998e-01, 1.9052e-01,
-4.1855e-02, 1.0140e-01, -1.7139e-02,
5.2301e-40, -2.9923e-40, 3.8688e-41,
3.1575e-40, 1.1504e-40, 5.5655e-40,
-3.4499e-40, 2.3050e-40, -6.3766e-41,
1.3282e-40, 4.5849e-40, 3.5308e-40,
-2.6657e-41, 5.9829e-40, 3.2791e-40,
-2.8348e-40, 2.5810e-40, 5.5791e-40,
4.2613e-40, 3.2607e-40, -2.0789e-40,
-3.9054e-40, -2.5608e-40, -2.7638e-40,
4.5027e-40, 2.7065e-40, -4.5593e-40,
1.6336e-40, -2.0391e-40, -5.9017e-41,
-7.9899e-41, -2.9870e-40, 5.6390e-40,
-2.5560e-41, -1.9786e-40, 9.4700e-41,
-7.4049e-41, -2.3902e-40, -2.8497e-40,
-1.8912e-40, -1.5589e-40, 5.5463e-40,
-2.1782e-40, -1.9532e-40, -2.3785e-40,
2.7539e-40, 4.0214e-40, 2.0732e-40,
7.0120e-41, -4.4200e-40, 7.3787e-41,
2.6452e-40, 1.1970e-40, 2.8298e-40,
5.2721e-40, 1.9304e-40, -3.8489e-40,
-3.9759e-40, 2.6184e-40, 1.2594e-40,
1.5831e-40, 3.7179e-40, -3.4915e-40,
-1.7681e-40, -6.9657e-41, -4.0746e-40,
8.0894e-41, 1.6950e-40, -1.0574e-40,
-1.0590e-40, 2.8466e-41, -2.7558e-40,
-5.4027e-40, 4.4355e-41, -3.2144e-40,
-4.8838e-41, -3.8595e-40, 2.5064e-40,
4.0365e-40, -1.0195e-40, 4.8356e-40,
4.4499e-40, -4.4871e-40, -2.4561e-40,
4.1687e-40, 5.2239e-40, -5.7603e-41,
-1.5211e-40, -3.5768e-40, 3.6385e-40,
1.6089e-40, 4.1624e-40, 4.5114e-40,
1.6438e-40, -3.6331e-40, 6.4961e-41,
5.0899e-40, 6.1036e-40, 2.4828e-40,
5.8681e-40, -5.7259e-40, -1.5371e-40,
5.2654e-40, 4.7412e-40, -2.0265e-40,
-4.8621e-41, 4.9497e-40, 3.0176e-40,
4.2235e-40, 4.5381e-40, 4.6501e-40,
-1.6124e-40, -1.9449e-40, 5.1497e-40,
-1.2891e-40, -1.6549e-40, 4.8348e-40,
-2.0735e-40, 1.3423e-41, -4.4109e-40,
-5.4218e-40, -1.1537e-40, -1.1664e-40,
5.6006e-40, 3.4109e-40, -3.1434e-40,
3.4969e-40, -5.3459e-40, 3.9245e-41,
2.4028e-40, 5.7774e-40, -6.2973e-40,
1.8802e-40, -4.6258e-41, -5.0716e-40,
3.4962e-40, -6.2313e-41, -2.7290e-40,
-5.2709e-40, -3.2225e-40, 2.4245e-40,
-3.6300e-40, -2.0794e-40, 4.0541e-40,
-3.5157e-02, 6.8337e-02, 1.6149e-02,
-5.8650e-03, 6.0605e-01, 3.1738e-02,
9.3306e-02, 2.1499e-01, 1.3609e-01,
6.4043e-02, -1.0253e-02, -6.2813e-04,
4.6828e-02, -3.9619e-01, -9.2633e-03,
-8.1752e-02, 9.9083e-02, 4.4296e-03,
7.1594e-02, 3.9860e-02, 8.1088e-02,
1.7750e-01, -1.2381e-01, 1.4476e-01,
2.3416e-02, 1.2819e-01, 1.0816e-02,
5.5296e-02, 5.5199e-02, -2.1253e-02,
1.7214e-01, 2.0542e-01, -3.7859e-03,
1.2831e-01, 3.2087e-02, -5.1851e-02,
-2.3686e-02, 1.2271e-01, -1.6009e-02,
-2.0176e-01, 7.4757e-01, -3.4526e-02,
-4.7055e-02, -3.7099e-01, -1.9216e-01,
-8.8030e-02, -2.5853e-02, -1.7087e-02,
-2.0533e-01, 1.5214e-01, -1.8639e-03,
-1.1236e-01, -2.4612e-01, 6.3094e-02,
2.3829e-02, -5.0078e-03, 5.3854e-02,
-9.6934e-03, 3.7047e-02, 4.7325e-01,
5.6975e-03, -8.6108e-02, 6.5569e-02,
-3.9768e-03, 2.0580e-02, -4.1931e-02,
6.9577e-02, -1.0416e-01, -2.5037e-03,
-1.9198e-02, 6.2027e-02, -1.0833e-02
}
,
{
-5.3430e-40, 2.5717e-41, 5.7504e-40,
7.1679e-41, 6.2076e-40, -8.4201e-41,
-4.2111e-40, 3.4851e-40, 1.3009e-40,
3.3016e-40, -7.6473e-41, -1.8392e-40,
2.2773e-41, 1.2087e-40, 1.1565e-40,
6.5190e-41, 2.0075e-40, 2.5796e-40,
5.0575e-40, -2.6261e-40, -2.5486e-40,
-3.9886e-40, -6.0644e-40, 2.9264e-40,
8.9627e-41, -3.0550e-40, -2.3456e-40,
-4.8855e-40, -4.8867e-40, -5.0492e-40,
-1.0706e-40, 5.3827e-40, -1.6413e-40,
1.4714e-40, -3.4024e-40, -4.4881e-40,
3.2361e-40, 2.0858e-40, 3.8836e-40,
2.0949e-40, 5.9633e-40, -1.7878e-41,
-4.1980e-40, -4.4383e-40, 2.7859e-40,
7.0317e-42, -8.9973e-41, 5.8700e-41,
1.8411e-40, -3.6097e-42, 2.7362e-40,
5.4341e-40, 6.0305e-40, 5.9004e-40,
5.2692e-40, -6.3449e-41, 1.2075e-40,
7.5297e-41, 8.9267e-41, 4.9139e-40,
-1.4609e-40, 3.1821e-41, 2.3288e-40,
3.1748e-41, -3.8052e-40, -2.4322e-40,
-5.7959e-40, 6.1966e-40, 3.4964e-40,
-5.6776e-40, -6.8327e-41, -3.3777e-41,
-5.9108e-02, 3.5468e-02, -2.8772e-02,
6.8602e-01, 1.4232e-01, 1.1954e-02,
-3.8234e-02, 7.1837e-02, -1.8832e-02,
4.7972e-02, 1.1623e-02, -2.1687e-03,
-4.9744e-01, 2.7751e-01, 1.7862e-02,
7.4286e-02, 3.1309e-03, 1.1030e-03,
-6.1084e-01, -8.5679e-03, 9.4956e-03,
-4.5246e-01, -1.2126e-01, -3.7368e-02,
2.5624e-02, 1.2087e-02, -1.5431e-02,
6.0313e-40, 1.8404e-40, -7.2006e-41,
6.0697e-40, -9.1199e-41, 5.8965e-40,
5.4830e-40, 1.3014e-40, 1.5585e-41,
-3.6027e-02, -6.3004e-03, 1.5237e-02,
6.0743e-01, 9.2523e-02, -4.7370e-03,
3.4407e-02, -8.3823e-02, 1.6898e-02,
5.7527e-40, -5.0621e-40, -2.9035e-42,
3.8199e-40, -2.2913e-40, -5.0895e-40,
4.0079e-40, 5.1744e-40, -3.3006e-40,
6.1448e-40, 1.2347e-40, -3.1673e-40,
7.3214e-41, 5.2143e-40, -2.6071e-40,
1.6109e-40, -2.0298e-40, 9.5817e-41,
6.9876e-02, -2.9290e-02, 3.2294e-03,
-4.2632e-01, 1.5789e-01, 3.6809e-02,
2.1220e-02, 1.6531e-04, 6.8502e-03,
-6.5221e-02, 8.8059e-02, 5.7934e-03,
-1.7280e-01, 1.5303e-01, 1.7663e-01,
-1.2908e-01, -1.1749e-01, 5.7887e-02,
1.0685e-01, 2.2763e-01, 3.3796e-02,
1.7629e-01, 3.8882e-01, 6.3540e-02,
6.4707e-02, 1.0046e-01, -8.1911e-02,
-3.9718e-03, 4.6416e-02, 4.7357e-02,
7.3694e-02, -1.6444e-01, 2.4784e-02,
-3.0808e-03, 2.7399e-02, -2.9216e-04,
2.4428e-40, -3.0160e-40, 2.3184e-40,
-4.9114e-40, 5.6685e-40, -3.6020e-40,
2.2618e-40, -2.8145e-40, 2.1149e-40,
2.3559e-02, -8.6949e-02, -3.8350e-02,
-2.9547e-01, 7.0187e-01, -8.3979e-02,
-2.8576e-02, -1.6538e-01, -5.2465e-02,
-1.6016e-40, -1.4760e-40, -2.1977e-40,
4.3180e-40, 4.1724e-40, -1.2969e-40,
-1.3023e-40, -1.0095e-40, -1.5965e-40,
-4.0721e-40, -4.1747e-40, -4.3706e-40,
-4.2838e-40, -4.5507e-40, -4.6023e-40,
-3.7435e-40, -3.9889e-40, -4.2249e-40,
-1.2429e-01, -3.5062e-01, -1.1418e-01,
-4.0787e-02, 6.1690e-01, -1.0085e-01,
1.6098e-02, 8.5100e-02, -1.1621e-02,
3.0709e-40, -4.4880e-40, -2.7530e-41,
-1.2649e-40, -5.3936e-40, 5.0995e-41,
4.4003e-40, -2.1211e-40, -6.6422e-43,
-1.8989e-40, -3.6631e-40, 4.1392e-40,
-3.9057e-40, -5.5599e-40, 6.9979e-41,
3.8983e-40, 5.6737e-41, 2.3997e-40,
-9.4862e-41, 2.4256e-40, -3.7040e-40,
1.6374e-40, 3.5439e-42, -1.0385e-40,
3.6145e-40, -2.4342e-41, -3.0115e-40,
-6.0009e-40, -5.2386e-41, -1.2504e-40,
2.9237e-40, -1.2290e-40, -1.1502e-40,
-3.5887e-40, -6.1810e-40, -1.6289e-41,
2.5438e-41, 5.1229e-40, -2.4915e-40,
1.3516e-40, 3.3553e-40, 8.5831e-41,
-8.5122e-41, 3.7625e-41, 2.5507e-40,
-1.5828e-40, 2.1991e-40, -1.5628e-40,
-5.3110e-40, 5.1395e-40, -5.8162e-40,
-3.1571e-40, -5.5139e-40, 1.2299e-40,
4.8855e-40, -9.3940e-41, -6.2534e-40,
-3.3275e-40, -2.4982e-40, -1.2956e-40,
-6.0047e-40, -1.8712e-41, -7.3274e-42,
-2.8519e-40, 3.5541e-40, 2.4485e-40,
-8.1435e-41, -2.7091e-40, 7.1206e-41,
-5.9519e-41, -2.5552e-40, -3.6189e-40,
7.7038e-02, -1.6317e-02, -2.4118e-02,
-4.3086e-02, -2.1512e-01, 1.2288e-01,
1.8237e-01, -1.5438e-01, -1.1346e-01,
-4.6141e-02, -4.0750e-02, -5.6414e-04,
-1.5640e-01, -3.4506e-01, -1.4441e-02,
-2.0278e-01, -3.1403e-01, -6.2542e-02,
-1.9622e-02, 1.6348e-02, 6.9859e-03,
-9.3142e-02, 1.0368e-02, -5.6585e-02,
8.4213e-02, 1.0776e-01, -1.0315e-01,
8.7873e-41, -5.3947e-40, 1.1714e-40,
7.5534e-41, -1.1871e-40, -5.4012e-40,
3.8269e-41, -1.4913e-40, -3.1802e-40,
-3.4707e-02, 1.2518e-02, 9.4679e-03,
1.2254e-01, 1.9394e-01, 2.6530e-02,
2.2413e-01, -1.6298e-01, -6.1446e-02,
-1.1042e-42, -2.7255e-40, -5.5067e-40,
3.8272e-40, 4.9956e-40, -3.2074e-41,
2.8351e-40, 4.2501e-40, 3.9389e-41,
6.1941e-40, -4.8790e-40, -3.4137e-40,
2.2577e-40, -5.7183e-40, -8.6861e-41,
5.7021e-40, -3.2349e-40, 1.9655e-40,
9.1180e-02, 5.6665e-02, -6.5437e-04,
1.1759e-01, 2.7517e-01, 1.9143e-01,
9.7905e-02, 6.6707e-02, 8.6535e-02,
8.8717e-03, 3.0913e-02, 6.6909e-03,
-8.1791e-02, -4.7883e-01, 7.4920e-02,
4.5843e-01, -1.0410e-01, 1.6655e-01,
-4.7094e-03, 3.4769e-02, -1.3291e-02,
-8.5570e-03, -4.0038e-01, 1.8418e-01,
-1.4696e-01, 3.2279e-01, 2.5712e-02,
-2.6207e-01, -4.6150e-02, -6.4099e-02,
-3.2623e-01, -1.8984e-01, -5.7891e-02,
-2.2088e-01, -4.2042e-02, -2.5307e-02,
1.0260e-40, 5.0443e-40, 7.5150e-41,
1.4402e-40, -5.1952e-40, -5.3810e-40,
6.2240e-40, 1.8661e-40, -8.2983e-41,
7.1850e-02, 4.8770e-02, -1.5081e-02,
4.8072e-01, 2.5477e-01, 3.8197e-02,
2.6011e-01, 2.4610e-01, -3.6167e-02,
3.8901e-40, 1.6760e-41, 2.8471e-40,
3.1983e-40, 1.2460e-40, -4.3961e-40,
3.9187e-40, 2.7818e-40, -9.1501e-41,
-2.3320e-40, -1.9998e-40, -2.8132e-40,
-2.9552e-40, -3.9643e-40, -5.1375e-40,
-1.6686e-40, -5.3138e-40, -2.6988e-40,
2.5623e-02, 2.6942e-02, 2.4342e-02,
-9.9084e-02, 5.2974e-01, -6.7983e-02,
-2.2454e-01, 1.1507e-01, 2.0364e-02,
3.4852e-01, -3.1091e-01, 8.1154e-02,
-3.2205e-01, 1.7103e-01, 2.4162e-01,
-2.6892e-03, 2.4142e-02, 5.5540e-02,
-4.5753e-02, -5.0097e-01, 1.7503e-01,
1.4058e-01, 1.1311e-01, 1.5945e-01,
-5.3975e-02, 5.2326e-02, -6.2382e-02,
9.4114e-02, -5.6812e-01, -1.2081e-01,
-8.5809e-02, -9.8661e-03, -2.3064e-02,
-1.6453e-03, -1.8328e-02, 2.4282e-03,
1.5943e-40, 4.6894e-40, -6.2730e-40,
3.8054e-40, -3.7914e-41, -1.4429e-40,
1.6925e-40, 5.1566e-41, -1.7909e-40,
-3.7920e-02, 2.4698e-01, 5.0019e-02,
-1.4246e-02, 2.8739e-01, -5.4704e-02,
7.9436e-02, -2.7838e-02, -3.4191e-02,
-3.3565e-40, 2.1368e-40, 6.7346e-42,
5.6681e-40, -5.5776e-40, -2.7705e-40,
-2.2966e-40, 1.1692e-40, -2.5187e-40,
4.4806e-40, -4.8424e-40, -9.1436e-41,
-4.3250e-40, -2.0721e-40, -2.0050e-40,
-5.1061e-40, 2.6405e-40, -3.0913e-40,
-1.2078e-01, 3.1948e-01, 1.0082e-02,
-1.0781e-02, 8.0720e-02, -4.6330e-02,
-1.8084e-02, -2.2846e-02, -5.5861e-03,
-3.2400e-02, -1.7329e-01, -2.7995e-02,
-5.3680e-02, 4.1310e-01, -9.4691e-02,
7.6938e-02, -4.9596e-02, 1.9649e-01,
3.2594e-02, 1.1544e-01, -1.8501e-02,
7.0248e-02, -6.9838e-02, -5.4278e-02,
-2.9317e-02, -1.4890e-01, 7.8661e-02,
3.7685e-02, 5.9594e-02, 8.9527e-02,
2.2957e-01, -2.9681e-01, -1.6329e-01,
-1.3206e-01, -4.3808e-02, 3.8854e-02,
1.7529e-40, -3.8429e-41, 1.4443e-40,
-4.0829e-40, -2.5643e-40, -5.4821e-40,
1.6827e-40, -1.1628e-40, 2.2441e-40,
5.2451e-02, 1.0179e-01, 4.8487e-02,
-2.1020e-01, -4.4345e-01, -8.7642e-02,
7.0958e-02, 1.9934e-01, -2.1090e-02,
-3.0795e-41, 2.7921e-40, 2.8491e-40,
-2.1154e-40, 9.8876e-41, -8.8824e-41,
2.6552e-40, 2.5767e-40, -3.8369e-40,
6.1348e-40, -3.4170e-40, -1.7109e-40,
-3.3080e-40, 5.4199e-41, -1.7512e-40,
1.8363e-40, -4.4080e-40, -2.5508e-40,
-4.0716e-02, -2.8531e-01, 3.9981e-02,
2.2278e-02, 5.6661e-01, -8.3890e-02,
-7.7331e-02, -9.3843e-02, 1.5584e-02
}
,
{
-3.6751e-40, -5.4562e-41, 6.1860e-40,
8.9003e-41, 5.5262e-40, 3.9537e-40,
-2.1258e-42, -3.1069e-40, -7.6225e-41,
-1.2220e-02, -8.6886e-02, 1.0714e-02,
1.1656e-02, -7.3635e-02, 5.9427e-02,
4.8518e-03, 1.3543e-01, 1.4668e-02,
-1.7505e-02, -2.0691e-02, -1.4507e-02,
2.6157e-02, 7.4109e-02, 1.2822e-02,
-1.9737e-02, -4.9281e-02, 8.5962e-03,
5.6236e-40, 2.4616e-40, 1.6384e-40,
-3.9469e-40, -1.7094e-40, 1.9285e-40,
-1.3634e-40, -1.5785e-40, 6.4184e-41,
-1.2752e-02, 2.3150e-02, -5.3355e-03,
-5.9667e-02, -3.9580e-01, -7.0033e-02,
-2.2612e-02, 1.9176e-02, 1.0588e-02,
8.0027e-04, 3.2242e-01, -2.2566e-02,
8.7850e-03, -2.4025e-01, 4.6123e-02,
-1.9038e-02, -8.5750e-03, -4.8153e-03,
-1.3049e-03, -5.7771e-03, 9.6437e-03,
3.2477e-02, 2.4482e-01, 4.0580e-02,
1.3194e-02, -4.6602e-01, -6.6163e-02,
-1.0647e-01, 7.3328e-02, 2.5871e-02,
-7.0883e-02, -9.2725e-02, -1.5185e-02,
1.1804e-02, 1.7784e-03, -4.4099e-03,
-4.9226e-40, -1.3081e-40, -3.5969e-40,
4.3539e-40, -2.9631e-40, 2.3531e-41,
5.6191e-40, 6.1545e-41, -1.1112e-40,
-1.1880e-02, -3.1884e-02, -2.0850e-02,
-6.8633e-03, 1.6422e-01, 1.0281e+00,
3.5887e-03, 2.1180e-01, -1.0094e-01,
-1.5103e-02, -4.9074e-02, -1.7702e-02,
7.2119e-02, 3.3199e-02, -9.7082e-04,
5.5383e-02, 1.0343e-01, 2.5156e-02,
2.9049e-40, -1.6397e-40, -8.8848e-41,
-6.2827e-40, 8.1281e-41, 5.2909e-40,
-4.1132e-40, 1.5751e-40, 1.5400e-40,
-7.3765e-02, -4.9723e-02, 4.9357e-02,
-2.4207e-02, -1.0291e-01, -1.4001e-03,
-1.2751e-02, 4.2805e-03, 1.8934e-03,
2.6862e-02, 1.1634e-01, 4.5666e-02,
-4.7351e-03, -4.1593e-01, 3.6082e-02,
1.1446e-02, -5.2026e-03, 1.8672e-02,
-7.0960e-04, -6.7877e-03, 9.6674e-03,
-4.9952e-03, 8.8664e-02, -2.7707e-02,
8.5309e-02, 5.5513e-02, -7.6230e-02,
3.6354e-02, 9.7794e-02, 1.1687e-02,
2.6847e-02, 3.2565e-01, -8.7710e-03,
-2.0372e-02, -1.9090e-02, -3.2566e-03,
-5.5592e-40, 7.4408e-41, 3.5576e-40,
2.7758e-40, 4.5458e-41, -6.2347e-40,
9.9739e-41, -1.6078e-40, -5.2900e-40,
1.1500e-02, -3.0675e-01, -3.0079e-02,
1.5080e-02, -2.4292e-01, 1.2736e-01,
-1.9513e-02, -1.9376e-02, -8.5960e-02,
-1.0241e-01, -2.1312e-02, -3.1999e-02,
-6.3598e-02, 1.5187e-01, 1.2279e-01,
1.5695e-03, 1.1376e-01, 5.2648e-03,
2.6415e-40, 3.0508e-40, 3.6407e-41,
-1.4403e-40, 2.8942e-40, -1.0089e-40,
2.2362e-41, 1.9843e-40, -1.5509e-40,
1.3269e-01, -3.1031e-01, -4.4091e-02,
4.6385e-03, 2.1411e-02, 5.7141e-02,
2.0724e-02, -3.5406e-02, 2.5717e-03,
-5.5922e-02, 7.1404e-01, -2.9852e-02,
1.3041e-02, 3.9373e-02, -2.4515e-01,
4.4278e-03, 2.1557e-02, -8.4940e-03,
1.3677e-02, -3.5183e-02, 1.2391e-02,
-9.2405e-02, 2.9650e-01, 6.9695e-02,
-3.3125e-02, 3.4700e-01, 1.4552e-01,
2.7357e-02, 5.2133e-01, -5.7571e-02,
2.7580e-02, 1.0381e-01, 1.3678e-02,
4.9260e-03, -4.4419e-02, 7.0651e-04,
2.9472e-40, -5.2892e-40, -3.6567e-40,
4.9403e-40, -6.2132e-40, -6.2920e-40,
-1.5156e-40, -3.6134e-40, 5.2432e-40,
-5.0427e-03, -2.8247e-03, -5.3734e-02,
-1.5918e-02, 1.8325e-01, -1.7834e-01,
-5.1774e-03, 8.0009e-02, 5.6296e-03,
3.1480e-02, 2.0665e-02, 2.7806e-04,
7.3085e-02, 7.7660e-01, 1.1979e-01,
1.9979e-02, 1.6629e-01, 2.3216e-02,
-5.9701e-40, 9.5583e-41, 1.8231e-40,
-3.3216e-40, -4.1253e-40, -3.3326e-40,
1.7131e-40, 2.9588e-40, -2.2520e-40,
-1.3337e-01, -4.2777e-01, -1.3569e-01,
2.9915e-02, -2.7016e-01, -3.7454e-03,
-1.3574e-02, -3.6298e-02, -1.6571e-02,
4.2530e-02, -4.2299e-02, 1.4320e-01,
1.4371e-02, -1.1289e-01, -3.8829e-02,
5.1689e-03, 1.5804e-02, 1.6125e-03,
-3.4601e-03, -7.2087e-03, -5.5514e-04,
4.4568e-02, 1.3621e-01, -4.3811e-02,
1.1350e-02, -2.8417e-01, 3.1553e-02,
-7.8854e-02, -2.0316e-01, 7.7746e-03,
-1.1437e-02, 2.1557e-01, -1.9479e-02,
-1.3511e-02, -2.0339e-02, -1.0276e-02,
-8.8977e-41, 5.9533e-40, -3.1413e-40,
-3.1892e-40, 5.5204e-40, -5.0634e-40,
-2.4932e-41, 4.3474e-41, 6.2961e-40,
4.7864e-03, 5.7125e-02, -1.5468e-02,
-3.9614e-03, -2.9042e-02, 2.8347e-01,
-1.0133e-02, 8.2745e-02, -1.0450e-01,
5.9537e-03, 1.4050e-02, 1.9802e-04,
2.4964e-02, 1.3077e-01, -4.7314e-02,
6.2744e-03, -1.9068e-01, 5.2593e-02,
-2.0550e-40, -2.4231e-40, 3.3927e-40,
-3.9609e-41, 2.2262e-40, 1.8866e-40,
2.0788e-40, -1.8012e-40, -1.9375e-40,
-4.7530e-03, -1.2315e-01, 8.2373e-03,
-9.2412e-02, 1.7156e-01, 1.1176e-02,
-1.4081e-02, 1.4694e-02, -1.9475e-02,
-1.5269e-02, -3.8430e-02, -7.4717e-02,
3.3361e-02, -1.1956e-01, 4.2304e-01,
-2.9924e-03, -3.3035e-02, -3.6560e-02,
-1.2386e-02, 6.3762e-03, -3.7047e-02,
1.3839e-02, -3.6358e-02, 4.3609e-02,
-8.3692e-03, 4.5794e-01, -3.0761e-01,
2.2287e-02, 2.5360e-02, -6.1253e-03,
-1.8992e-02, -4.0078e-01, 7.3821e-02,
5.6517e-03, 4.2348e-02, -2.5642e-02,
5.5659e-40, -6.1219e-40, 4.1493e-40,
5.7719e-42, -3.7181e-40, -3.3260e-40,
-4.8241e-41, 5.2207e-40, -1.2199e-40,
-1.2074e-02, 1.7647e-01, 1.1882e-02,
6.4764e-03, -2.3742e-01, -1.8033e-01,
2.5866e-02, 6.5985e-02, 3.7191e-02,
5.1047e-02, -3.0457e-02, 1.2531e-02,
-1.3252e-01, 1.2593e-01, -6.3717e-02,
4.0794e-02, -1.4786e-02, 1.7139e-02,
2.4343e-40, -1.7451e-40, 2.0169e-40,
-5.5166e-40, 2.4201e-40, -2.5701e-40,
2.9947e-40, 2.9321e-40, -1.6015e-40,
-3.6598e-02, -1.8520e-03, -1.6999e-01,
-8.6806e-02, -7.7266e-02, -9.6042e-02,
-2.1342e-02, 2.5793e-02, -7.2541e-03,
3.0667e-02, -2.6287e-01, 3.0592e-02,
-4.5559e-02, -1.4716e-01, 2.0932e-01,
-5.8472e-03, -1.0023e-02, 1.2134e-02,
-1.3284e-02, 2.0538e-02, -5.4476e-04,
5.8096e-02, -1.4790e-02, -2.0158e-02,
-3.9654e-02, -2.2069e-01, -1.5089e-01,
-1.8966e-01, -1.6834e-01, 9.8934e-02,
8.2326e-02, 7.5585e-02, -1.7188e-02,
-1.4985e-02, 2.1823e-02, -7.7015e-03,
1.8353e-40, 4.8298e-40, -2.0568e-40,
-3.7196e-40, -5.7237e-40, 1.0648e-40,
9.4960e-41, 3.0411e-40, 1.3294e-40,
-1.4884e-02, 4.9767e-02, -3.0288e-02,
8.9874e-03, -1.0290e-01, 3.1344e-01,
5.9735e-03, -2.0813e-01, -6.6145e-03,
1.6592e-02, 3.0529e-05, -1.0180e-02,
-4.8683e-02, 1.4025e-01, 2.9237e-02,
-2.3334e-02, -9.6638e-02, -1.0268e-02,
-4.9497e-41, -5.6377e-40, -2.0142e-40,
2.1230e-40, 1.6067e-40, 3.4830e-40,
-4.9031e-40, -3.0290e-40, -2.9060e-40,
3.4053e-02, -8.9560e-02, -4.4479e-02,
4.2128e-02, 6.9253e-02, -7.1096e-03,
4.2358e-02, -1.7215e-02, 9.0389e-03,
1.8129e-02, -1.4785e-01, 1.1267e-01,
-7.1637e-02, 5.5595e-01, -1.0569e-02,
1.8481e-02, -4.7556e-02, -1.1185e-02,
-1.1766e-02, -8.5959e-03, -3.0046e-02,
-2.1081e-03, 1.1518e-01, -8.4419e-02,
-7.5829e-02, 1.8199e-01, -9.7726e-03,
3.6473e-02, 1.8761e-01, 4.9495e-03,
-6.9640e-02, -2.8775e-01, 3.6149e-02,
9.6345e-04, 1.3967e-02, -6.0015e-03,
2.9861e-40, 3.9190e-40, 5.3741e-40,
3.8059e-40, 4.7113e-40, 5.9498e-40,
-5.0640e-40, -4.1610e-40, 6.2009e-40,
-2.3464e-03, -7.3888e-02, 3.4701e-02,
-5.2257e-04, 3.8444e-02, -5.3735e-01,
-1.7970e-03, 9.0298e-02, 5.3151e-02,
-2.6033e-02, 1.2973e-02, 4.9147e-03,
2.3005e-02, 1.7045e-01, 2.4715e-02,
2.7981e-02, -8.4662e-02, -9.4778e-03,
5.3019e-40, -2.1800e-40, 1.5281e-40,
-1.0282e-40, 1.8040e-41, 1.3929e-40,
-5.9679e-40, -5.2958e-40, 1.4429e-40,
3.4325e-02, -1.7240e-01, -4.9645e-02,
-2.4341e-02, 5.2652e-02, -1.1188e-02,
-3.6336e-03, 4.2148e-04, 3.3086e-03,
5.5059e-03, 1.7744e-01, -2.8681e-02,
-3.4868e-03, -1.4569e-01, 1.6508e-02,
4.6766e-03, -1.7963e-02, -2.6397e-03,
4.3618e-03, -4.2793e-03, -4.7820e-04,
-4.2795e-02, 2.0070e-01, 3.8402e-02,
5.0586e-02, 2.1910e-01, -3.4381e-02,
5.7625e-02, 4.2314e-01, -1.9732e-02,
3.4811e-02, -2.3033e-01, 1.1477e-02,
-7.3744e-03, 1.9112e-02, 4.2251e-03
}
};
static __device__ __constant__ const float HDNL0biasL[8][8] =
{
{
0.0272, -0.5743, -0.0333, -0.0334, 0.0082, -0.0263, -0.0048, -0.0167
}
,
{
-0.0239, -0.0385, 0.0026, 0.0288, -0.0225, 0.0082, -0.0191, -0.0185
}
,
{
-5.8305e-03, -8.6574e-02, 4.2228e-02, -4.3500e-02, -8.1892e-04, 3.3171e-03, -1.1582e-02, -4.1205e-40
}
,
{
-0.0053, 0.0053, -0.0114, -0.0127, -0.0039, -0.0426, 0.0053, -0.0017
}
,
{
-0.0046, -0.0104, -0.0087, -0.0040, 0.1077, 0.0347, -0.0165, 0.7296
}
,
{
8.7612e-02, 5.9126e-01, 4.6709e-03, -1.1559e-39, 2.3381e-02, -1.2136e-40, -5.6040e-39, 3.7100e-02
}
,
{
-3.3246e-39, -1.4536e-02, -6.3362e-02, 8.5347e-41, 7.9956e-02, 3.0679e-04, -1.0257e-02, -1.2037e-02
}
,
{
-0.0006, 0.0117, 0.0083, 0.0686, -0.0046, 0.0015, -0.0076, 0.0079
}
};
static __device__ __constant__ const float HDNL0kernelsL10[4 * 8] =
{
0.4908, -0.0457,
-0.1716, -0.2115,
-0.0015, -0.3152,
0.3045, 0.0330,
-0.2981, 0.0912,
0.0122, 0.2281,
0.3331, 0.2853,
0.2210, 0.2611,
0.2364, 0.0792,
0.2885, -0.7122,
-0.3715, 0.1404,
-0.0260, 0.2144,
0.2378, 0.1570,
-0.5734, 0.2077,
-0.0851, 0.2771,
0.0415, -0.1858
};
static __device__ __constant__ const float HDNL1kernelsL1[9 * 8] =
{
-6.6326e-02, -2.2316e-01, 4.2471e-02,
1.7064e-02, -6.8305e-01, -1.5978e-01,
6.7568e-01, 3.2212e-01, 8.3561e-02,
-4.6649e-01, -6.8789e-02, 5.3455e-01,
-5.0941e-01, 7.0657e-02, 4.5647e-01,
-2.3657e-02, 3.5302e-02, -1.8316e-02,
-2.0316e-01, 4.7021e-02, -2.2313e-01,
5.3465e-02, 7.0750e-01, 9.1366e-02,
-2.8566e-01, -2.0521e-02, -7.1786e-02,
4.8186e-02, -9.3429e-02, 2.4493e-03,
3.4654e-01, 7.2625e-02, 1.6615e-01,
3.2101e-01, 3.2923e-01, -9.8548e-02,
1.1916e-02, 2.0413e-01, -1.8920e-02,
6.0858e-02, 8.3548e-01, 1.4060e-01,
-9.1827e-01, -2.4551e-01, -4.6118e-02,
-5.2737e-02, 4.3151e-01, 1.7027e-01,
2.6647e-01, 5.5240e-01, 3.4745e-03,
5.3495e-02, -4.7059e-02, -2.6593e-02,
1.5691e-01, 4.7332e-01, 2.6651e-03,
1.7997e-02, 4.1367e-01, 1.3239e-02,
4.6932e-02, 1.0278e-01, 1.0699e-02,
-3.4319e-02, -7.6373e-01, -9.7022e-02,
-1.4160e-01, 2.9567e-01, 6.6220e-01,
7.3508e-05, 1.2683e-01, -6.3442e-02
};
static __device__ __constant__ const float HDNL1biasL1[8] =
{
-0.0264, -0.0229, -0.3021, -0.2579, -0.0327, -0.0053, -0.7777, 0.0232
};
static __device__ __constant__ const float HDNL1kernelsL[8][9 * 8 * 8] =
{
{
-7.8588e-41, -5.0770e-40, -2.3334e-40,
5.7174e-40, 6.9060e-41, 2.2264e-40,
-4.1631e-40, 4.5667e-40, -1.8115e-40,
-3.1000e-40, 3.1019e-40, 5.5423e-40,
-5.8518e-40, 2.1290e-40, -5.4579e-40,
-3.7753e-40, 3.6029e-40, -1.7875e-40,
4.2296e-40, 6.5672e-41, 1.4976e-40,
-3.1479e-40, -3.2881e-40, -5.9818e-40,
3.2053e-40, 3.0821e-40, 5.1321e-40,
-2.6557e-17, -3.8205e-17, -3.7077e-17,
-2.5168e-17, -3.4817e-17, -3.4186e-17,
-1.8056e-17, -2.3105e-17, -2.2581e-17,
5.9355e-40, 2.4052e-40, -1.0027e-40,
2.2060e-40, 3.4864e-40, -5.7403e-40,
4.6936e-40, -3.3951e-40, -4.7715e-40,
-9.7917e-11, -1.0331e-10, -9.6141e-11,
-1.0581e-10, -1.1173e-10, -1.0317e-10,
-1.0192e-10, -1.0681e-10, -9.8738e-11,
-1.0402e-29, -2.3233e-29, -1.7882e-29,
-1.4804e-29, -3.7821e-29, -3.0750e-29,
-1.0448e-29, -2.6740e-29, -2.1676e-29,
4.2124e-40, 2.5024e-40, 4.5312e-40,
-2.4880e-40, 2.9838e-41, -2.7215e-41,
-2.6347e-40, 1.5950e-40, 9.3734e-41,
-1.4936e-01, -1.0438e-01, 2.9827e-02,
1.4751e-02, -1.6854e-01, -8.8101e-02,
4.9228e-02, -3.0744e-02, -1.1512e-01,
-3.4996e-02, -2.5024e-02, -1.8880e-02,
3.0008e-02, 4.8689e-02, -1.3415e-01,
-9.1698e-03, -1.1019e-02, -5.0655e-02,
-6.6579e-02, -2.6447e-02, 1.9791e-02,
-4.1727e-02, 3.6433e-02, 3.1516e-02,
-5.7619e-02, 2.3401e-02, 3.0785e-02,
-3.3610e-02, 1.2263e-01, 2.4351e-02,
1.7148e-02, 1.7144e-01, 4.0305e-02,
8.7902e-03, -7.0077e-02, -1.0688e-01,
4.7460e-02, -1.4093e-03, -1.5911e-02,
-2.2978e-02, 9.9025e-02, 1.2867e-02,
3.4704e-02, 1.4672e-01, 7.9188e-02,
-4.4222e-02, -3.9480e-02, -1.9193e-01,
-3.1897e-02, 1.0776e-01, -5.2742e-02,
8.0377e-02, 2.5764e-01, -9.7330e-02,
-1.1593e-01, -5.3753e-02, -2.8918e-02,
6.7939e-02, 2.3963e-01, 2.0856e-01,
2.7964e-02, 2.7781e-01, 2.1859e-01,
-1.5196e-02, 9.6704e-03, -8.0136e-02,
8.9441e-02, 1.0314e-01, -2.0204e-02,
-3.3970e-02, -1.4562e-02, 3.4723e-02,
2.3357e-40, -1.4361e-40, 2.0498e-40,
-5.2355e-40, -6.0151e-40, -2.9264e-40,
1.9715e-41, 5.9793e-41, -1.3675e-40,
5.3771e-40, 6.5637e-41, -3.8471e-40,
-3.0820e-40, -1.7004e-40, -1.9371e-40,
-5.1159e-40, 7.3244e-41, 3.5861e-41,
2.8441e-40, 4.5248e-41, 1.9771e-40,
-2.4681e-40, 3.6054e-40, 3.3496e-40,
-6.5048e-42, -1.6001e-40, 4.8243e-41,
-1.0165e-08, -9.9140e-09, -9.6054e-09,
-1.0511e-08, -1.0256e-08, -9.9066e-09,
-1.0521e-08, -1.0320e-08, -9.9896e-09,
2.6042e-40, 4.2016e-40, 5.3537e-40,
1.4594e-40, 1.1344e-40, 3.5144e-40,
-2.5736e-37, -1.3591e-39, 2.1029e-40,
-3.1420e-07, -3.0309e-07, -2.9630e-07,
-3.1196e-07, -2.9967e-07, -2.9249e-07,
-3.1296e-07, -3.0086e-07, -2.9332e-07,
-6.1256e-12, -5.9283e-12, -5.6508e-12,
-6.5297e-12, -6.4118e-12, -6.0667e-12,
-6.8382e-12, -6.8547e-12, -6.5225e-12,
-5.0327e-26, -1.0795e-25, -1.8952e-25,
-2.4220e-26, -5.9067e-26, -1.1323e-25,
-2.1499e-27, -5.5342e-27, -1.0333e-26,
4.5039e-03, -1.3303e-02, 1.6183e-01,
6.5951e-02, -7.1353e-02, 1.7254e-01,
-1.8671e-03, 1.0593e-01, -3.6872e-02,
4.9102e-02, -2.4075e-03, 4.8194e-02,
-7.0892e-02, -1.8948e-01, -1.6586e-01,
-2.8102e-02, 2.0870e-02, 5.9228e-02,
1.2673e-02, 3.3908e-02, 4.8282e-02,
4.4369e-02, 5.6304e-02, 1.2225e-02,
4.1855e-02, 1.1990e-01, 6.3799e-02,
-7.3884e-02, 1.4153e-02, 9.5825e-02,
4.2850e-02, -3.5337e-02, 1.3615e-01,
-2.0900e-01, -2.2835e-02, -8.6987e-02,
-6.7793e-02, 1.3547e-01, -9.9666e-02,
3.5498e-02, 5.3725e-02, 1.1501e-01,
-1.2238e-01, 3.5354e-02, 7.4216e-02,
-3.5288e-02, 7.0111e-03, 2.4820e-02,
-1.0649e-02, 1.6715e-01, 1.2825e-01,
3.1145e-02, 1.2097e-01, -1.2073e-02,
-7.0603e-02, 5.5574e-02, -5.0025e-02,
-8.2885e-02, 1.0957e-01, 1.3311e-01,
2.9147e-02, -1.1849e-02, 8.9953e-02,
-3.2247e-02, -1.0747e-02, 9.1431e-03,
1.2114e-01, -5.9780e-02, 5.4821e-02,
-5.2592e-02, -6.9082e-02, -7.5981e-02,
-7.8533e-02, 1.3658e-01, 1.0923e-01,
-3.2530e-02, -2.1342e-01, -1.2200e-01,
-1.9196e-02, 1.0450e-01, -8.9044e-02,
-2.0110e-02, 6.1439e-02, -2.7405e-02,
6.0823e-02, -6.4268e-03, -9.1778e-03,
6.4877e-02, -6.1227e-02, -5.4466e-02,
9.6375e-02, 1.7519e-01, 5.0725e-03,
1.9159e-01, 3.9725e-01, 1.2851e-01,
-6.9197e-02, 4.9372e-02, -3.4221e-02,
1.1583e-01, 1.3389e-01, 2.9135e-01,
1.0290e-02, 1.1214e-01, 1.7560e-01,
-1.8048e-02, 8.4782e-02, 4.9925e-02,
-3.8447e-02, -1.3156e-01, -1.1072e-01,
1.8256e-01, 2.2831e-01, -1.6508e-01,
4.6781e-02, 1.4913e-01, -8.6956e-02,
5.1365e-04, 6.7873e-02, -3.4787e-03,
1.7689e-01, 1.8414e-01, 2.2286e-01,
1.2571e-01, 1.7687e-01, 1.5949e-01,
5.9904e-02, 1.6259e-01, 1.4313e-01,
2.2234e-01, 4.0943e-01, 3.1469e-01,
1.9799e-01, 4.3052e-01, 3.0510e-01,
1.2259e-01, -1.0778e-02, 6.2284e-03,
1.4508e-02, -6.9073e-02, 5.0998e-02,
5.2962e-02, -1.5291e-01, -1.0491e-02,
-8.6903e-02, -1.0430e-01, 3.0130e-02,
4.1691e-02, -1.2675e-01, -5.5169e-01,
8.9644e-02, 3.6910e-02, -1.5459e-01,
5.3656e-03, 6.7936e-02, 1.0793e-01,
-2.7424e-02, -1.7652e-01, -3.5776e-01,
2.4593e-02, -5.6237e-01, -5.9038e-01,
-9.4807e-02, -7.5681e-02, -3.6990e-02,
8.7385e-03, -5.7989e-02, -4.9573e-02,
-7.7422e-02, -1.1899e-01, -7.4023e-02,
9.1539e-03, -1.1760e-01, 4.6825e-02,
1.9901e-02, -3.9718e-02, 1.2997e-02,
4.2209e-02, -5.2119e-02, -1.2255e-01,
2.4262e-02, 5.3676e-02, -2.4767e-01,
-4.2933e-02, -2.2473e-01, -4.0310e-01,
-3.5160e-02, 1.9858e-01, -1.5943e-01,
1.3208e-01, -1.0493e-01, -6.7076e-02,
-2.5244e-01, 1.1175e-02, 2.5568e-01,
-3.3867e-01, 3.1953e-02, 5.9426e-01,
4.0551e-02, 4.4914e-03, -1.9348e-02,
-6.7386e-02, -1.5543e-01, -3.0883e-02,
8.9177e-02, -4.6432e-02, 6.8227e-02,
8.7784e-02, 3.6127e-02, -2.0375e-02,
4.5461e-02, -4.9071e-02, 9.9435e-02,
-2.5700e-01, -2.7706e-01, 6.2776e-02,
-6.9571e-02, -5.7888e-03, 9.3852e-02,
2.8490e-02, -2.7854e-01, 1.4209e-01,
1.5373e-02, -4.3503e-02, 9.6895e-02,
1.1682e-02, 1.5608e-01, 1.5844e-01,
5.8027e-02, 2.6632e-02, -8.5479e-03,
1.2836e-01, 2.0714e-01, 1.0228e-01,
1.4647e-02, 5.7609e-02, -1.6728e-02,
2.1212e-01, 3.2673e-01, 4.5670e-02,
-6.0844e-02, -1.1768e-01, -1.1233e-01,
5.0123e-04, 6.3947e-02, -1.8356e-01,
1.4091e-01, -2.1568e-02, 8.5933e-02,
-3.9406e-02, 8.2921e-02, -1.0601e-01,
4.1284e-02, -7.3138e-02, 1.7264e-01,
2.5883e-02, 5.2945e-01, 2.4510e-01,
2.7291e-03, 4.0173e-02, 7.8221e-03,
-3.5795e-02, -4.8631e-03, -2.2715e-01,
1.2330e-01, 7.1739e-01, -4.1725e-01,
7.5106e-02, 2.5267e-02, -2.8655e-01,
-7.8731e-02, -7.5747e-03, -5.5601e-02,
7.9764e-02, 1.0524e-01, 8.6742e-03,
2.1791e-02, 3.7304e-02, -1.1534e-01,
-1.2011e-01, -7.5160e-02, 1.3737e-02,
-2.9470e-01, 2.6613e-01, -2.3740e-02,
1.2957e-01, 1.4752e-01, -9.3655e-02,
2.9828e-02, 2.0664e-01, 1.9731e-02,
-8.0378e-02, -3.9481e-01, -1.5395e-01,
-5.7944e-02, -8.6343e-02, -5.4324e-02,
7.1664e-02, 1.5294e-01, -1.2112e-02,
2.1023e-02, 1.1945e-01, -7.2998e-02,
-1.1693e-02, -1.8818e-01, -9.8693e-02,
-6.7017e-02, 6.9767e-02, -5.0268e-02,
-9.1106e-03, 2.4267e-01, 6.0277e-02,
3.5269e-02, 7.7376e-02, 1.6642e-02,
-5.2600e-02, -1.8864e-01, -1.1195e-01,
3.2119e-01, -9.7913e-02, 1.4734e-01,
8.6988e-02, -5.3563e-03, -2.6136e-03,
-9.1528e-03, 2.8186e-01, -1.5933e-01,
4.8499e-02, 4.5189e-01, -1.6399e-01,
5.8164e-02, 6.3251e-02, -2.8738e-02,
2.0424e-01, -7.2819e-02, 2.1903e-02,
-3.5630e-01, 1.3171e-01, -7.6749e-02,
3.8848e-02, 1.7902e-01, -1.1902e-01,
-4.4221e-02, 1.5032e-02, 2.9078e-02,
-1.9738e-01, -1.4878e-02, 1.3315e-02,
1.3956e-02, 1.2856e-01, 7.0688e-02,
2.0933e-01, 1.7286e-01, 6.7601e-02,
5.5136e-01, 4.6866e-01, 1.8402e-01,
2.2362e-01, 2.4124e-01, 1.3167e-01
}
,
{
-5.2308e-12, -5.4024e-12, -5.0039e-12,
-5.4553e-12, -5.6928e-12, -5.2812e-12,
-5.0230e-12, -5.2150e-12, -4.9133e-12,
5.7994e-02, 1.0051e-01, -1.0618e-01,
6.8090e-02, 1.2789e-01, 1.1380e-01,
-1.5882e-01, 8.2323e-03, -9.1424e-02,
2.0132e-07, 2.0907e-07, 2.1344e-07,
2.1179e-07, 2.2018e-07, 2.2381e-07,
2.1095e-07, 2.1920e-07, 2.2150e-07,
2.9336e-02, 5.4427e-02, -1.2082e-01,
5.8399e-02, 2.2261e-01, 1.1165e-01,
-9.6098e-02, 8.3175e-02, -6.5909e-02,
1.2007e-01, 1.9776e-01, 7.7464e-02,
6.7018e-02, 3.6536e-01, 1.3796e-01,
6.0724e-02, 4.6161e-02, 2.3740e-01,
-2.1117e-02, -2.0200e-02, 9.3703e-02,
-4.6932e-02, -1.5910e-01, 8.8094e-02,
-5.6641e-02, -1.7146e-01, -1.0502e-01,
-2.5624e-01, 1.6049e-01, -3.3267e-02,
-2.3248e-01, 5.4036e-01, 1.0027e-01,
-2.1680e-01, -7.0096e-03, -1.0692e-01,
-4.8357e-02, 2.5107e-01, 4.8323e-02,
9.7245e-02, 5.5015e-01, -3.4641e-01,
1.2458e-02, -1.3626e-01, -4.1992e-01,
-2.1359e-40, -1.4250e-40, -4.7123e-40,
-5.9433e-41, 1.9903e-41, -1.7701e-40,
-5.9941e-40, -5.8562e-40, -5.0226e-40,
-2.6581e-40, 1.3006e-40, -1.4201e-40,
5.4264e-40, 2.3848e-40, 5.6412e-40,
-2.6378e-41, -5.7132e-40, -4.1343e-40,
-3.2848e-22, -3.6697e-22, -3.4147e-22,
-3.5780e-22, -3.9435e-22, -3.5989e-22,
-3.1212e-22, -3.4305e-22, -3.0670e-22,
-1.1749e-08, -1.1602e-08, -1.1494e-08,
-1.2125e-08, -1.1918e-08, -1.1718e-08,
-1.1779e-08, -1.1623e-08, -1.1559e-08,
-5.0237e-07, -4.9179e-07, -4.6744e-07,
-5.1967e-07, -5.0826e-07, -4.8421e-07,
-5.0226e-07, -4.9668e-07, -4.8019e-07,
5.6433e-41, -3.0514e-40, -5.4526e-40,
1.1125e-41, 2.9485e-40, 5.5282e-40,
3.0229e-40, 1.5915e-40, 5.3759e-40,
-6.1144e-27, -9.2380e-26, -2.4302e-25,
-9.3834e-25, -1.0289e-23, -1.9513e-23,
-4.3746e-24, -4.4359e-23, -7.0505e-23,
-8.1604e-36, -3.2928e-37, -2.2994e-40,
-3.9543e-37, -9.9513e-39, 7.4616e-41,
-4.0044e-39, 4.4392e-40, 4.8856e-40,
-3.3447e-40, -3.9935e-40, 2.4649e-40,
2.0207e-40, -3.0245e-40, -7.1986e-41,
6.2938e-40, -3.6922e-40, 1.5296e-40,
-6.4982e-41, 5.0849e-41, 5.7873e-40,
1.4327e-40, -4.2163e-40, 1.3807e-40,
2.8569e-40, 1.9139e-40, 3.2985e-40,
-5.4410e-40, 2.3070e-40, 2.1690e-40,
-1.5964e-40, -2.2781e-40, 5.6766e-40,
2.2533e-42, -2.5532e-40, -5.5822e-40,
5.7249e-40, 5.3555e-40, -4.9107e-41,
1.7538e-40, -1.2312e-40, 5.0077e-40,
6.1500e-40, 1.9980e-40, 6.2953e-40,
-7.5314e-23, -9.4299e-23, -7.1342e-23,
-8.5139e-23, -1.1237e-22, -9.0478e-23,
-6.2038e-23, -8.5180e-23, -7.3015e-23,
5.0613e-40, 1.5224e-40, -1.8977e-40,
2.4108e-41, -5.1771e-40, 6.2317e-40,
1.0465e-40, 2.8816e-41, 6.2500e-40,
3.5727e-40, 4.2717e-40, -3.5900e-40,
-4.4831e-40, 3.4260e-40, -4.8293e-40,
-2.4133e-40, 3.1140e-40, -2.0777e-40,
-2.2906e-41, 3.5923e-40, -4.4443e-40,
-4.6615e-40, -2.1123e-40, 4.5700e-40,
-4.6360e-40, -3.6052e-40, -3.4319e-40,
-3.6575e-40, -3.5707e-40, -3.0530e-41,
4.2531e-40, -1.2255e-40, -3.9607e-40,
3.5903e-40, -5.4630e-40, -3.1460e-40,
2.8820e-40, 4.9460e-40, 6.1461e-40,
8.9118e-41, -4.6579e-40, -2.4172e-40,
-5.5474e-40, -8.1848e-41, -1.6910e-40,
-1.6272e-25, -1.8802e-25, -1.7229e-25,
-1.7850e-25, -2.0338e-25, -1.8235e-25,
-1.4715e-25, -1.6733e-25, -1.4681e-25,
-5.5471e-09, -5.6862e-09, -5.7043e-09,
-5.8727e-09, -5.9823e-09, -5.8983e-09,
-5.8040e-09, -5.8670e-09, -5.7388e-09,
-9.7253e-07, -9.7248e-07, -9.4623e-07,
-1.0149e-06, -1.0042e-06, -9.6709e-07,
-1.0139e-06, -9.9930e-07, -9.5295e-07,
-4.5042e-40, 2.6725e-40, 2.3181e-40,
-4.6274e-41, -1.1799e-40, 5.0685e-40,
-1.0765e-40, 3.3322e-40, -6.1905e-40,
-1.3653e-34, -3.4690e-33, -1.1578e-32,
-1.4444e-31, -2.1995e-30, -4.8668e-30,
-1.2965e-30, -2.0189e-29, -3.3962e-29,
-2.5057e-40, 7.2876e-41, 4.5731e-41,
-1.6525e-40, 5.0987e-40, -5.4683e-40,
8.1836e-41, 6.2722e-40, -3.1057e-40,
4.0987e-40, 3.5941e-40, 5.1680e-40,
5.5563e-40, 3.1011e-40, 4.7068e-40,
1.0426e-40, -1.0803e-40, 4.4867e-40,
-4.9675e-03, 1.5412e-01, -4.1930e-03,
-6.1089e-02, 2.0405e-01, 1.9587e-01,
3.8772e-02, 1.6894e-01, -2.6163e-02,
1.0839e-30, 1.8608e-30, 1.1386e-30,
1.4863e-29, 1.9422e-29, 1.1639e-29,
1.7504e-29, 2.2177e-29, 1.3629e-29,
6.4484e-02, 6.6296e-02, 2.2838e-01,
-1.0213e-01, 7.5883e-02, -1.7531e-01,
-1.4869e-01, 1.0736e-01, 1.4129e-01,
-2.8235e-02, -2.9232e-02, -9.3912e-02,
5.1317e-02, 9.0256e-02, -2.4669e-02,
-3.2465e-02, 5.8099e-02, 9.8402e-02,
-2.3135e-01, -1.3786e-01, 2.8581e-01,
-3.2410e-01, -2.6623e-01, 6.1583e-02,
1.8696e-01, 4.7251e-02, -2.3520e-01,
2.5630e-02, -1.2358e-01, -1.5735e-01,
-1.2198e-01, 5.1970e-01, 1.9976e-01,
-1.2515e-01, 9.8768e-02, 5.8917e-02,
-3.8569e-02, -9.2729e-02, -1.8982e-01,
1.1378e-01, 5.7195e-01, -1.8265e-01,
-3.5724e-02, -2.1379e-01, -2.2129e-01,
-5.1198e-40, -3.4709e-40, 6.2940e-40,
-2.2134e-41, -3.6133e-40, -2.7075e-40,
-5.9664e-40, -2.3937e-40, 3.0876e-40,
9.1814e-41, 9.5898e-41, -3.1892e-40,
3.1093e-40, 2.7935e-40, 1.7966e-40,
-2.3967e-40, 4.0806e-40, 6.2012e-40,
5.3771e-41, 6.1000e-40, -4.6695e-40,
5.9474e-41, -4.9675e-40, 5.7403e-41,
4.7091e-40, -5.0751e-41, 3.9864e-41,
-9.7756e-41, 2.7978e-40, -5.0791e-40,
-3.4321e-40, -7.0774e-41, -5.2651e-40,
2.8034e-40, -3.3452e-40, 1.9535e-40,
-6.2300e-40, -1.8372e-40, -1.9038e-40,
-5.6564e-40, -6.1257e-40, -1.0338e-40,
-1.7191e-41, -1.2843e-41, 5.0707e-40,
-4.4587e-40, 2.7128e-40, -1.4155e-40,
-5.7475e-40, -3.4612e-40, -4.7424e-40,
1.7235e-40, -6.0028e-40, -1.6342e-40,
-5.1072e-40, -2.4721e-40, -2.8477e-41,
2.6598e-40, -4.4078e-40, 4.1763e-40,
-3.3947e-40, -5.5626e-40, 4.9713e-40,
2.1733e-40, -2.9024e-40, -4.5514e-42,
-3.4873e-40, -1.0737e-40, -1.4297e-40,
2.8514e-40, 2.6283e-40, 2.2827e-40,
3.8908e-40, -4.2140e-40, 6.1433e-40,
-4.7825e-40, -3.0140e-40, -5.9563e-40,
1.5280e-40, 2.6156e-40, 5.0361e-40,
1.9497e-01, 2.3140e-01, -3.5244e-02,
1.6876e-01, -1.7646e-02, -2.0413e-01,
9.8052e-02, -6.7906e-02, -3.9834e-02,
-5.9252e-15, -6.7431e-15, -8.1865e-15,
-5.7350e-15, -6.6893e-15, -8.9833e-15,
-8.4106e-15, -1.0631e-14, -1.5948e-14,
8.9389e-02, 6.6460e-02, 6.8477e-02,
6.1099e-03, -8.7536e-02, 1.1792e-01,
-1.0079e-01, 1.5293e-01, 4.3945e-02,
1.0168e-01, 1.0281e-01, -7.9173e-02,
2.0855e-01, 1.7537e-01, -7.1000e-02,
-1.4157e-01, -3.8478e-02, -2.7478e-01,
2.2156e-01, -6.4262e-02, -7.2841e-02,
-3.2334e-01, 6.5591e-02, 1.1163e-01,
7.2151e-02, -1.6943e-01, 5.9049e-02,
-1.4813e-01, -2.0904e-01, -8.8010e-02,
-2.7215e-01, 5.7668e-01, 1.7618e-02,
-7.1365e-02, 1.2976e-01, -1.0169e-01,
-8.9229e-02, 3.3971e-02, 1.8295e-01,
1.7204e-01, 3.8082e-01, 3.7415e-02,
5.9309e-02, -4.9550e-04, 5.1555e-01,
-5.1006e-18, -5.6038e-18, -5.8724e-18,
-5.8910e-18, -5.8379e-18, -5.6311e-18,
-5.2596e-18, -5.1835e-18, -4.6300e-18,
6.4067e-02, 1.8889e-02, -1.0634e-01,
1.7316e-04, 1.9935e-01, -1.1854e-02,
-9.3669e-02, -1.1924e-01, -1.8981e-02,
1.7465e-08, 1.7340e-08, 1.7565e-08,
1.8234e-08, 1.8008e-08, 1.8017e-08,
1.9226e-08, 1.8956e-08, 1.8651e-08,
-1.7294e-01, -1.2200e-01, -4.9577e-02,
-3.5087e-02, -1.2526e-01, 9.3445e-03,
-7.4374e-02, -1.1350e-01, 2.7510e-03,
8.5153e-02, 4.2080e-02, -5.0111e-02,
1.2845e-01, 1.9630e-01, 1.0542e-01,
-1.0095e-01, 6.2631e-02, 8.8734e-02,
3.4836e-01, 5.4389e-01, -2.2360e-01,
5.1721e-01, 5.7094e-01, -6.7491e-02,
-3.5972e-02, 1.0590e-01, -2.2984e-01,
-1.5483e-01, -5.1271e-03, 4.9780e-02,
-1.3184e-01, 2.8028e-01, -1.1427e-02,
-3.4093e-02, -6.7622e-02, -1.2359e-02,
1.3184e-02, 1.2125e-01, -1.2502e-02,
9.2730e-02, -6.5974e-02, -1.6519e-01,
1.9546e-01, -1.5188e-01, -8.1752e-02
}
,
{
-3.4905e-04, -3.5739e-04, -3.2920e-04,
-3.8506e-04, -3.9121e-04, -3.5635e-04,
-3.7303e-04, -3.7698e-04, -3.4190e-04,
2.8622e-41, -1.2033e-41, 1.2609e-40,
-4.9379e-40, -5.1047e-40, 5.5085e-41,
-4.7002e-40, -5.0136e-40, -4.5629e-40,
-5.1095e-40, 1.8741e-40, 1.8435e-40,
4.1851e-40, -8.9558e-41, -9.6681e-41,
-1.8244e-40, 2.7992e-40, 1.8116e-40,
2.8655e-40, -3.0193e-40, 2.2293e-40,
1.6805e-40, 3.3049e-40, 6.9542e-41,
-3.3329e-40, 4.2212e-40, -1.3453e-40,
-8.4502e-15, -1.1099e-14, -9.4174e-15,
-9.8778e-15, -1.1768e-14, -9.4875e-15,
-6.7805e-15, -7.4561e-15, -5.8023e-15,
6.0452e-40, 6.9262e-41, 2.9300e-40,
-6.1511e-40, -4.1269e-40, 4.4012e-40,
1.3340e-42, -2.9020e-40, -4.5529e-40,
-1.2289e-22, -1.3972e-21, -5.5694e-21,
-1.7854e-21, -1.7743e-20, -5.6749e-20,
-6.8510e-21, -6.2353e-20, -1.6203e-19,
-5.0003e-07, -5.1950e-07, -4.7654e-07,
-5.5510e-07, -5.7995e-07, -5.2753e-07,
-5.3262e-07, -5.5802e-07, -5.0971e-07,
-1.4922e-02, -1.1926e-01, -1.9067e-02,
-2.6298e-03, 2.1756e-01, 3.0148e-02,
1.4372e-01, 3.5066e-02, -1.0184e-02,
-4.1698e-12, -4.8798e-12, -6.4033e-12,
-2.3169e-12, -2.7879e-12, -3.7276e-12,
-1.6177e-12, -2.0021e-12, -2.6440e-12,
-5.9514e-40, -4.4339e-40, -3.0315e-40,
3.5756e-40, 2.5390e-40, -1.2253e-40,
2.1417e-40, 4.0569e-40, 5.3962e-40,
-5.5825e-13, -6.8528e-13, -9.3486e-13,
-2.9163e-13, -3.6959e-13, -5.1183e-13,
-1.8703e-13, -2.4740e-13, -3.4019e-13,
-2.7137e-01, -4.5025e-01, 2.6405e-02,
-7.9580e-02, 5.0698e-01, -7.8794e-02,
-3.7540e-02, -7.1115e-03, -3.9741e-01,
-5.9910e-40, -5.5101e-40, 3.1274e-41,
-6.9384e-41, -4.9294e-40, -1.0818e-40,
-3.5484e-40, -4.7965e-41, -5.2508e-41,
4.1917e-01, -1.6207e-02, -6.8506e-02,
-2.7060e-02, 5.6162e-01, 1.6696e-01,
-1.7677e-03, 1.8842e-01, -6.0493e-02,
-3.0696e-01, -1.7293e-01, -8.7143e-02,
-1.6740e-01, 1.8861e-02, -1.7112e-01,
8.6594e-02, 3.0025e-01, -7.6141e-02,
1.1317e-02, 1.0678e-01, -5.1283e-02,
-1.2872e-01, 4.2580e-01, 4.9678e-02,
-2.8372e-01, -1.3479e-01, -7.3813e-02,
-1.7038e-15, -1.1156e-15, -7.3385e-16,
-2.6350e-15, -1.6234e-15, -1.0598e-15,
-7.7860e-15, -4.6981e-15, -3.0030e-15,
-3.0246e-40, -4.1596e-40, 2.9013e-40,
8.5195e-41, -2.2396e-40, -2.0322e-40,
-5.6200e-40, 2.4820e-40, 3.1309e-40,
-3.1822e-17, -1.6585e-17, -8.8616e-18,
-5.9907e-17, -2.9812e-17, -1.6126e-17,
-2.4410e-16, -1.2541e-16, -6.7867e-17,
1.5795e-01, -1.4429e-01, -6.0501e-02,
5.9113e-02, 3.4391e-01, 1.4165e-01,
5.2564e-02, -1.8209e-01, -6.8176e-02,
-7.7363e-41, 5.9969e-40, 5.9290e-40,
-7.4888e-41, -7.0945e-41, 5.3120e-40,
1.3612e-40, -4.6718e-40, -1.0677e-40,
-1.1498e-01, -1.2925e-02, 2.6735e-02,
-8.1469e-02, 2.9678e-01, 1.8971e-01,
2.0149e-02, 2.4207e-03, -1.2549e-01,
-6.6799e-02, -3.5900e-02, -5.6111e-02,
9.5181e-02, 2.1216e-02, 2.0477e-01,
8.5923e-03, 6.8615e-03, 3.8252e-02,
4.5098e-03, 2.1321e-01, 3.4612e-03,
3.5662e-01, 4.7532e-02, 2.5319e-01,
4.1275e-02, 1.7951e-01, 3.2239e-02,
-2.6628e-21, -7.7165e-22, -4.9086e-22,
-1.4320e-21, -2.7134e-22, -1.2712e-22,
-1.9648e-21, -3.4172e-22, -1.3895e-22,
-2.2836e-40, 3.2091e-40, -4.4396e-40,
2.9048e-40, 6.0866e-40, 3.7804e-40,
-3.0676e-40, -2.4897e-40, 4.9891e-40,
-1.8955e-28, -3.4994e-29, -1.2914e-29,
-4.7737e-29, -3.5212e-30, -6.4003e-31,
-8.2908e-29, -3.1692e-30, -3.6909e-31,
-9.3327e-02, 1.5314e-01, 1.0676e-01,
2.5979e-01, -6.6826e-01, 2.3727e-01,
1.4855e-01, 1.9205e-01, 8.8246e-02,
-5.5197e-40, 5.3162e-41, -5.2933e-40,
1.0846e-41, -5.8128e-40, -3.1273e-40,
-2.8408e-40, 1.6989e-40, 4.8221e-41,
7.8403e-02, 1.6407e-01, 7.9932e-02,
3.2253e-01, -2.6036e-01, -8.9727e-02,
-7.5145e-02, 1.5536e-02, -8.2710e-02,
-2.1608e-01, -4.4619e-01, -4.4470e-02,
-3.9430e-01, -8.2373e-01, -7.0646e-01,
-6.9004e-03, -4.9697e-01, -1.4212e-01,
-1.8932e-06, -1.8356e-06, -1.6373e-06,
-1.9427e-06, -1.9113e-06, -1.7028e-06,
-1.8843e-06, -1.8616e-06, -1.6818e-06,
-4.7452e-29, -4.4894e-29, -2.5364e-29,
-5.6268e-29, -5.4363e-29, -3.0876e-29,
-4.3808e-29, -4.2767e-29, -2.4573e-29,
3.8855e-40, 3.5152e-40, -4.8707e-40,
4.3606e-41, -1.7886e-40, 5.1970e-40,
6.2864e-40, 5.9972e-40, 2.2197e-40,
-2.1903e-37, -1.9174e-37, -7.0785e-38,
-2.7149e-37, -2.4810e-37, -9.5619e-38,
-1.8463e-37, -1.7136e-37, -6.7163e-38,
-2.9062e-30, -3.1324e-30, -1.0876e-30,
-2.7434e-30, -3.7036e-30, -1.2821e-30,
-6.8828e-31, -9.8708e-31, -3.7930e-31,
-6.3329e-41, -3.8604e-41, -2.8272e-40,
-3.3350e-40, -1.5210e-40, -4.2620e-41,
-1.7669e-41, 5.2291e-40, -3.3205e-40,
-3.0738e-25, -8.2305e-24, -2.1451e-23,
-1.4470e-24, -4.5131e-23, -1.2177e-22,
-4.2841e-24, -1.3077e-22, -3.5946e-22,
-8.5637e-08, -8.4715e-08, -7.7597e-08,
-8.7326e-08, -8.7480e-08, -8.0290e-08,
-8.4525e-08, -8.4963e-08, -7.8582e-08,
-5.8581e-27, -8.8483e-27, -8.1150e-27,
-7.4336e-27, -1.2036e-26, -1.1909e-26,
-6.6006e-27, -1.0685e-26, -1.0809e-26,
-5.6355e-40, -2.3469e-40, -3.5885e-40,
-2.0755e-40, 2.0377e-40, 3.2259e-40,
-5.3947e-40, 4.2747e-41, 4.8967e-41,
4.5073e-41, 5.0069e-40, 2.6114e-40,
-4.8225e-40, -4.8317e-40, -5.4316e-40,
-5.4335e-40, -5.2994e-40, 2.6295e-40,
-1.1702e-40, -2.3137e-41, -4.5405e-40,
-4.6797e-40, 6.5582e-41, 1.8111e-40,
6.1477e-40, -1.6827e-40, -2.0288e-40,
-2.4220e-41, 4.7774e-40, 5.1050e-40,
4.9844e-40, 5.6437e-41, 4.7749e-40,
-6.8037e-41, -5.5944e-41, -5.2248e-40,
-2.9382e-40, 2.3800e-41, 1.5850e-40,
-4.5290e-40, -5.2260e-41, 2.3726e-40,
-1.9232e-40, -2.3502e-40, -2.9736e-40,
-2.8081e-40, -5.2929e-40, -4.0786e-40,
-3.0303e-41, 3.1336e-40, -5.8450e-40,
-1.5091e-40, -2.7371e-40, -4.5927e-40,
-4.0985e-38, -6.9102e-38, -5.4450e-38,
-6.2744e-38, -1.1526e-37, -9.9374e-38,
-4.8587e-38, -9.1819e-38, -8.0593e-38,
-2.9266e-29, -4.5005e-29, -3.9891e-29,
-3.8505e-29, -6.3370e-29, -6.0017e-29,
-3.2761e-29, -5.4145e-29, -5.1812e-29,
3.3692e-40, 1.0044e-40, -6.6821e-41,
9.2910e-41, 6.2137e-40, -3.5625e-40,
1.8601e-40, 3.1653e-40, -1.1506e-40,
1.2093e-40, -5.7191e-40, 5.6828e-40,
-2.3177e-40, -2.1648e-40, 5.3642e-40,
4.8826e-40, 5.2760e-40, -4.9059e-40,
-2.0721e-40, 2.0122e-40, -5.9485e-40,
3.8843e-40, -6.0861e-41, -4.0542e-40,
-3.4308e-40, -4.2822e-40, -3.9605e-40,
-5.7429e-40, 4.9242e-40, -5.9141e-40,
4.6267e-40, -2.4953e-40, -2.9300e-40,
5.3466e-40, -5.2403e-40, 3.5178e-40,
-1.8309e-40, 2.9157e-40, -7.7367e-41,
-5.8922e-40, 3.2359e-40, -6.1293e-40,
6.1138e-40, 2.2121e-40, -5.0657e-42,
4.7910e-40, -1.4080e-40, 1.9220e-40,
-3.5670e-40, 3.4204e-40, -5.0215e-40,
1.1877e-41, 2.3114e-40, -4.7794e-40,
-3.6520e-40, 4.3222e-40, -5.2866e-40,
-6.0703e-40, -4.0896e-40, -1.2521e-40,
-4.1981e-40, 5.4404e-41, 3.3337e-40,
1.3733e-01, 1.8485e-01, 7.6179e-02,
8.1719e-02, 3.3343e-01, 2.9857e-02,
-4.2753e-03, 2.0957e-01, 1.8582e-02,
2.9948e-07, 3.3403e-07, 3.7619e-07,
3.4854e-07, 3.8224e-07, 4.1507e-07,
3.7511e-07, 4.0398e-07, 4.3743e-07,
-1.7150e-41, -2.4088e-41, -1.5593e-40,
6.3817e-41, 4.8004e-41, -1.1053e-40,
-2.5225e-40, -2.7111e-40, -4.2970e-40,
1.0496e-06, 1.0916e-06, 1.1376e-06,
1.1364e-06, 1.1756e-06, 1.2051e-06,
1.1762e-06, 1.2105e-06, 1.2358e-06,
1.0037e-02, 1.4957e-01, -4.9010e-02,
2.6877e-02, 1.9067e-01, -1.9339e-03,
-2.2081e-02, -1.5137e-01, -1.6088e-01,
1.6880e-41, -2.0352e-41, -4.1857e-42,
2.0926e-40, -2.1394e-41, -5.4341e-40,
4.6824e-40, 6.2682e-40, 4.9865e-40,
-3.2967e-01, -2.5981e-01, -1.3016e-01,
-2.6507e-01, 3.2282e-01, 4.3204e-01,
-7.0936e-02, 1.9800e-01, 9.4916e-02,
-1.0122e-02, 7.4127e-02, -7.1554e-02,
7.7869e-02, 1.5734e-01, 1.3287e-01,
-9.5431e-02, 1.0984e-01, -7.6759e-02
}
,
{
-5.5262e-40, 3.7699e-40, -1.4920e-40,
4.0064e-40, -2.0632e-40, -4.4801e-41,
-3.6749e-40, 5.9043e-40, -1.5942e-40,
-5.9219e-42, -4.1286e-40, -1.6920e-40,
-2.5927e-40, -4.5458e-41, 2.0990e-40,
-4.6860e-40, 5.0483e-40, 2.8004e-40,
-4.0641e-40, 6.0770e-40, -3.8297e-42,
5.7537e-40, 5.7772e-40, -1.0048e-40,
1.5945e-40, 3.9582e-40, -2.6190e-40,
-5.1046e-40, -5.5028e-40, 5.8786e-40,
-3.5033e-40, -1.2031e-40, -3.4156e-40,
3.0058e-40, 4.3043e-40, 5.9825e-40,
4.9197e-40, 2.5974e-40, -4.3461e-41,
-4.1935e-40, -1.6383e-41, -1.4680e-40,
-5.3501e-40, -2.6348e-40, 3.0631e-40,
-5.2019e-40, -4.4123e-40, 2.3984e-40,
-4.4682e-41, -4.6000e-40, -5.0418e-40,
-4.1263e-40, 4.5391e-40, 2.8844e-40,
5.2179e-40, -1.3188e-40, 5.1600e-40,
-2.2913e-40, -3.1127e-40, 5.4478e-40,
2.3395e-41, 5.4758e-40, 2.0998e-40,
-1.9914e-10, -2.0700e-10, -1.9815e-10,
-2.1098e-10, -2.1989e-10, -2.1131e-10,
-2.0797e-10, -2.1693e-10, -2.0860e-10,
-2.1061e-40, -2.1208e-40, -3.3698e-40,
3.2370e-40, 2.9276e-40, -3.6860e-40,
3.4752e-40, -2.0660e-40, -3.8183e-40,
-8.0136e-02, 1.3809e-02, 1.6846e-03,
3.7960e-02, 8.7557e-02, -3.5498e-01,
9.8165e-03, 9.8384e-02, 1.2395e-01,
-2.8751e-02, 9.9172e-02, 5.5841e-02,
-4.0383e-02, 1.0856e-01, -5.4339e-01,
1.3245e-02, -4.7642e-02, -1.0427e-01,
-7.4696e-03, 5.0806e-02, -1.7179e-01,
5.0303e-02, -4.0322e-01, 7.4760e-01,
-9.2342e-02, 1.1958e-01, -1.8871e-01,
3.7044e-40, -4.6951e-40, -1.9873e-40,
5.3289e-41, 2.7689e-40, -4.6994e-41,
-3.1404e-40, -5.9106e-40, 6.0436e-40,
-6.0294e-40, -3.6565e-40, -1.1884e-40,
5.5933e-40, -9.5741e-41, 4.4736e-40,
4.3267e-40, -4.9583e-40, 3.4437e-40,
-1.7432e-40, 1.4518e-40, 2.1033e-40,
-3.4667e-40, 1.7222e-40, -2.5651e-40,
-5.2517e-40, 2.8983e-41, -1.3832e-40,
-1.4153e-01, 9.4023e-02, -9.8526e-02,
2.0678e-01, 4.0842e-01, -1.1853e-01,
-1.4108e-01, -1.1005e-01, -8.1274e-02,
3.4336e-41, 1.5625e-40, 2.7213e-40,
-5.3447e-40, -3.7330e-40, -3.3637e-40,
-4.3563e-40, -3.7094e-40, 1.2820e-41,
-8.1700e-02, -1.8215e-01, -1.6011e-01,
-1.4203e-01, 5.3791e-02, -3.7663e-02,
-1.1705e-01, -1.2604e-01, -8.4890e-03,
-6.1578e-02, -3.3907e-01, 2.2344e-03,
1.5060e-01, -1.9199e-01, -5.5274e-02,
6.2300e-02, 9.1084e-02, 1.3788e-02,
4.9025e-02, 3.3738e-01, -1.8104e-01,
-2.5051e-01, 8.2363e-02, 2.0325e-01,
5.6988e-02, -1.5118e-01, 6.8897e-02,
-4.6233e-40, 1.2244e-40, -3.9802e-40,
5.8530e-40, -2.4162e-40, 4.6793e-40,
-4.8362e-40, 3.3071e-40, 1.7094e-40,
3.5249e-40, -4.8579e-40, 1.9374e-40,
6.2372e-42, 5.8402e-41, 3.2851e-40,
6.1488e-40, 1.8086e-40, -5.2451e-40,
-3.0723e-40, -5.6704e-40, -5.9899e-40,
-3.5975e-40, -1.3818e-40, -2.7285e-40,
2.4468e-40, 8.3606e-41, 1.8818e-40,
-2.3749e-01, -2.7008e-01, -1.5222e-03,
1.4806e-01, 9.0783e-02, 2.7170e-02,
1.8706e-01, 1.8162e-01, -1.1799e-01,
-1.9852e-40, -4.8879e-40, -3.1971e-40,
-1.0245e-40, 9.1421e-41, 5.3018e-40,
2.2240e-40, -1.4666e-40, -4.4259e-40,
1.1835e-01, -2.7624e-01, 1.1446e-01,
1.3574e-01, 4.3109e-01, 1.3227e-01,
3.2554e-02, 1.7139e-01, -1.1988e-01,
3.5376e-02, 8.9191e-02, 6.7643e-02,
-8.2716e-02, 2.4178e-01, 6.0818e-02,
-6.7722e-02, -3.3712e-02, 3.0664e-02,
-6.6948e-02, 2.2886e-01, 1.8143e-01,
1.8636e-01, -2.4800e-01, 1.7185e-01,
-6.5479e-03, 1.8828e-01, -7.4464e-02,
-2.8281e-30, -5.8969e-31, -2.3180e-31,
-1.6163e-30, -3.8426e-31, -1.6788e-31,
-1.9412e-30, -4.1995e-31, -1.7651e-31,
-2.0525e-40, 4.6680e-40, 5.9108e-41,
1.0336e-40, -5.7226e-41, -6.1906e-40,
-1.8693e-40, 5.5777e-40, 6.0898e-40,
-3.4735e-41, -3.2674e-40, -2.3864e-41,
-3.3596e-40, 3.3107e-40, 1.0843e-40,
5.1103e-40, 6.0598e-40, -3.6267e-40,
-4.5583e-03, -1.0635e-01, -7.4962e-02,
-1.2741e-01, 2.7234e-01, 1.0508e-01,
-2.1207e-01, 9.6720e-02, 3.4641e-02,
1.1304e-12, 1.1614e-12, 9.7086e-13,
1.3361e-12, 1.3697e-12, 1.1286e-12,
1.2620e-12, 1.2938e-12, 1.0680e-12,
-8.4197e-02, 6.3834e-02, 2.3157e-02,
-2.1280e-02, 2.9074e-01, 8.5883e-02,
-1.3695e-01, -1.6047e-01, -4.5834e-02,
-1.3848e-01, -6.6090e-02, -7.7201e-02,
-5.1963e-02, 6.0643e-02, -4.9932e-02,
1.1779e-01, 1.7521e-01, 3.0366e-02,
4.7601e-03, 4.3941e-02, -3.5985e-02,
1.7692e-02, -2.3705e-01, 2.1062e-01,
7.7174e-02, -7.6616e-02, 2.0102e-02,
-3.6353e-06, -3.5534e-06, -3.2461e-06,
-3.6813e-06, -3.6196e-06, -3.3222e-06,
-3.5581e-06, -3.5179e-06, -3.2504e-06,
-7.3892e-11, -7.2930e-11, -6.8104e-11,
-7.9244e-11, -7.7770e-11, -7.2319e-11,
-7.7297e-11, -7.5673e-11, -7.0195e-11,
-1.5180e-10, -1.5027e-10, -1.4244e-10,
-1.6013e-10, -1.5761e-10, -1.4940e-10,
-1.5682e-10, -1.5395e-10, -1.4553e-10,
-9.1167e-02, 1.2374e-01, -3.8304e-02,
2.2641e-01, 2.4855e-01, -4.3174e-02,
1.4364e-01, 1.8438e-01, 1.1617e-02,
6.1925e-40, 3.3333e-40, 1.8962e-40,
3.2481e-40, -1.7566e-40, -3.0456e-40,
2.7654e-40, 3.8422e-41, 4.9191e-40,
7.5657e-02, -1.0697e-03, 3.0319e-02,
-4.7642e-02, -9.4454e-02, -2.6543e-02,
-5.3129e-02, -1.9667e-01, -1.0851e-01,
-8.5909e-03, 1.2177e-01, 2.6434e-01,
2.4468e-02, 5.0484e-02, 3.4698e-01,
-1.4764e-03, 3.7374e-02, 1.2658e-01,
2.0602e-02, -2.4624e-02, 1.3741e-01,
1.8641e-02, 4.0484e-01, 3.2976e-01,
-4.4809e-01, -3.2104e-03, 1.6290e-03,
8.1306e-41, 2.0311e-40, 2.9683e-40,
-5.7636e-40, 4.4291e-40, 4.3356e-40,
-7.1797e-41, 4.5366e-40, 3.9953e-40,
-4.5418e-40, 4.1805e-40, -3.2458e-41,
-9.4881e-41, -8.6365e-41, -1.9294e-40,
7.1954e-41, -9.8565e-41, -5.5540e-40,
-5.3769e-40, 1.4094e-40, -1.5355e-40,
8.8038e-41, -3.6848e-40, -1.2237e-40,
-2.8267e-41, -1.7583e-40, -5.9647e-40,
1.0929e-01, 2.9895e-02, -1.4923e-01,
-1.1234e-01, -1.0514e-01, -1.3280e-02,
2.2255e-01, 6.4152e-03, -1.6309e-02,
-1.5899e-40, -7.2549e-41, -2.6734e-40,
-3.3842e-40, 3.3255e-40, 4.2694e-40,
5.2940e-40, 3.2455e-40, -3.7081e-40,
6.3639e-02, -3.3720e-02, -2.3453e-02,
1.9477e-01, 5.2267e-02, 1.8565e-02,
1.6048e-01, 2.7636e-01, 1.5930e-02,
1.7673e-03, 6.3646e-02, -1.5127e-02,
-3.7787e-02, -1.4037e-01, -3.6231e-02,
-1.5636e-02, -7.8742e-02, -2.4137e-02,
-5.0748e-02, 6.5641e-02, -2.5353e-03,
8.4955e-02, 7.4231e-01, 1.3795e-01,
-1.4552e-01, 2.0869e-01, 4.0739e-02,
-2.0015e-41, 5.2988e-40, 2.7578e-40,
4.1051e-40, 1.2834e-40, -3.4898e-40,
-1.1975e-40, 4.2374e-40, -3.0404e-41,
-6.3014e-40, 4.6330e-40, -4.4141e-41,
2.5442e-41, 5.7456e-40, 2.3848e-40,
-1.0788e-40, -5.0563e-40, -5.3638e-41,
3.5728e-40, 1.9752e-40, 6.1004e-40,
2.8189e-41, -6.2151e-40, 1.1807e-41,
6.5305e-41, 5.2028e-40, 1.3692e-40,
6.4391e-02, -1.3079e-01, -3.7980e-02,
-3.2362e-01, -3.7239e-01, -8.0182e-02,
-2.6787e-01, -3.1240e-01, -1.2798e-02,
-1.2072e-40, 5.3996e-40, -3.4352e-40,
-8.0996e-41, -3.0208e-40, 3.1848e-40,
-5.6407e-40, 2.4674e-41, -2.1055e-40,
-9.2897e-02, 1.8040e-01, -4.3269e-01,
-7.6669e-02, 4.3554e-01, -4.4870e-02,
-2.3249e-02, -1.1805e-01, 1.0507e-01,
-5.2540e-02, -3.6856e-01, 1.1246e-01,
-2.3632e-02, 1.3165e-01, -1.5380e-02,
-1.1467e-02, -5.3754e-02, -4.1619e-02,
-1.5635e-01, 3.8584e-01, -1.4434e-01,
1.7523e-01, 3.7253e-02, 4.9784e-01,
5.8484e-02, -8.4711e-02, -7.7498e-02,
-1.6956e-40, 5.4293e-41, -2.5140e-40,
-3.1995e-40, -4.8337e-40, 2.5539e-40,
-1.1449e-40, 1.9503e-40, -1.7368e-40,
5.4753e-40, 5.9720e-40, -4.7821e-40,
3.8830e-40, -3.1984e-40, -2.7163e-40,
-5.3411e-40, 7.2638e-41, 4.3186e-40,
4.6654e-40, -5.9540e-40, -2.8155e-40,
-1.4801e-40, -1.6945e-40, 1.9723e-40,
5.8380e-40, -6.1587e-40, 3.3667e-40,
-2.9327e-02, -4.2746e-02, -1.5018e-01,
8.6354e-02, 2.8140e-01, 1.2970e-02,
-2.0755e-01, 6.7548e-02, -3.6049e-02
}
,
{
9.5728e-41, 5.3991e-40, -1.3764e-40,
-2.0389e-40, 2.4254e-40, 3.3492e-40,
6.5289e-41, -3.0842e-40, 5.5850e-40,
7.7599e-02, 2.5043e-02, -1.4099e-02,
-3.3184e-02, 5.6863e-01, -2.7001e-02,
-5.2659e-02, 5.4713e-02, 2.3991e-03,
2.2010e-02, -3.9120e-02, -1.1558e-01,
9.1633e-02, 1.3070e-01, 1.2489e-01,
-4.4040e-02, -1.6324e-02, -4.9631e-02,
-7.3548e-02, -2.0492e-01, 1.4043e-01,
-6.0411e-02, 5.7710e-02, -3.6840e-02,
1.3173e-02, 2.3215e-03, 1.1820e-02,
2.5772e-02, -1.3436e-01, -5.9285e-02,
-9.3983e-02, 1.1545e-01, 1.1602e-01,
-1.8505e-02, 6.1498e-02, -1.3097e-02,
9.8690e-03, -2.1338e-02, -1.2175e-01,
1.7936e-02, -2.7811e-02, 6.7037e-02,
-5.1401e-03, 7.6421e-02, -1.0794e-01,
4.6409e-02, 3.4701e-01, 2.6587e-02,
8.4175e-02, 5.2712e-01, 6.8999e-02,
-8.0756e-02, 1.9648e-01, -8.4639e-02,
1.2818e-01, 4.0660e-02, 7.6715e-02,
8.7991e-02, 4.6556e-01, -4.0025e-02,
2.1251e-03, -8.3784e-03, 5.9859e-02,
1.9835e-40, -3.4675e-40, -7.9692e-41,
-1.4304e-40, 2.3927e-40, -5.9796e-40,
3.8209e-40, -6.3260e-41, -9.2501e-41,
3.2007e-01, 1.5800e-01, -1.9594e-02,
-4.5315e-02, 1.0536e-01, -8.0692e-02,
2.1185e-01, -3.1418e-01, -1.5257e-01,
8.6294e-02, -1.3398e-01, -1.0694e-01,
8.6084e-02, -1.2393e-03, 1.7549e-02,
-1.5504e-01, -1.3112e-01, -3.5905e-02,
-3.8190e-01, 3.8393e-01, 1.6587e-02,
1.5002e-01, 1.9586e-01, -2.6260e-01,
-4.0159e-02, -8.2891e-02, -1.7761e-01,
-1.8611e-01, -1.1241e-02, -4.2538e-02,
-5.7898e-02, 2.4583e-01, 4.1590e-02,
2.4890e-02, 7.9409e-03, -2.7418e-02,
6.6194e-03, -4.2441e-02, -1.1167e-01,
-1.3236e-01, -7.9642e-02, -6.0623e-02,
-4.7198e-03, 5.6904e-02, 1.2651e-01,
1.2925e-01, -5.9162e-02, -9.1949e-04,
1.8668e-02, -2.6361e-02, -7.1042e-03,
-4.3178e-02, 2.6050e-04, 4.4799e-02,
7.9674e-02, 2.7656e-02, 7.1211e-03,
1.1463e-01, 1.0765e-01, 7.6066e-02,
-8.0780e-02, -5.4875e-02, 1.5209e-02,
-3.7365e-13, -3.7819e-13, -3.5929e-13,
-4.0298e-13, -4.0881e-13, -3.9033e-13,
-3.9409e-13, -3.9950e-13, -3.8277e-13,
-1.7847e-02, -1.7537e-02, -3.7313e-03,
2.6531e-02, 7.5951e-02, -4.0134e-03,
1.7387e-02, 6.0044e-02, -9.0211e-02,
2.7091e-02, 8.8333e-02, 1.0619e-01,
5.0470e-02, 1.2406e-02, 1.5503e-01,
-1.5936e-02, -2.2422e-01, -2.4640e-02,
-8.2430e-03, -1.4097e-02, -6.2474e-02,
8.0534e-02, 1.8603e-01, -3.1725e-02,
-3.1621e-03, 2.0362e-03, -1.4002e-01,
-7.3799e-03, 1.5881e-01, 6.7195e-02,
4.5946e-02, 2.4358e-01, 1.4677e-01,
-7.4788e-02, 6.7297e-02, 9.0735e-02,
-8.4553e-03, -1.1877e-02, 4.4209e-02,
-1.4281e-02, -6.8849e-02, -4.1386e-03,
3.2286e-02, 4.7128e-02, -1.2988e-02,
-2.2990e-02, -8.9265e-02, 6.4050e-02,
-2.3354e-02, 1.3846e-01, -1.6256e-01,
-6.5661e-02, -2.8983e-02, -4.3497e-02,
1.0597e-02, -2.3534e-02, -2.6068e-02,
-7.8812e-02, 1.9502e-01, 6.8938e-03,
3.2025e-02, 2.3353e-02, 4.9225e-02,
-5.0273e-40, 1.2403e-41, 5.8127e-40,
3.2777e-40, -3.5740e-40, 4.9781e-40,
-2.4198e-40, -4.6311e-40, 1.3330e-40,
-3.0803e-01, 1.7804e-01, 1.0604e-01,
4.1405e-01, 1.9740e-01, -5.3067e-02,
2.3738e-01, -1.6828e-01, 1.5338e-01,
6.6857e-03, 1.8623e-01, -1.2126e-01,
-1.6323e-01, -1.2719e-02, -1.7743e-01,
-1.3612e-01, -3.4442e-02, -1.0552e-01,
-1.4560e-01, 1.8771e-01, 8.4508e-02,
5.8732e-02, -2.2378e-01, 1.2673e-01,
3.0455e-03, 3.8438e-02, -6.2235e-02,
1.9951e-02, 2.6963e-01, -1.8594e-01,
-8.6550e-02, -1.3097e-01, -3.5032e-02,
2.0423e-02, -9.0499e-02, 1.7130e-01,
-1.8592e-01, 6.6808e-02, -1.5768e-01,
-6.4402e-02, -1.2265e-01, 6.8487e-02,
1.9899e-02, 9.3376e-02, 7.8577e-02,
-1.3384e-01, -7.6429e-02, 1.7142e-02,
-1.2385e-01, -1.1821e-01, -1.2716e-03,
5.3770e-02, 1.4973e-01, 1.4762e-01,
-4.7688e-02, -1.1733e-01, -1.5032e-01,
-2.0699e-01, -9.4949e-02, -2.6374e-02,
4.4489e-02, 1.8376e-02, -7.6844e-02,
1.8831e-40, -2.6056e-40, -4.7602e-40,
-3.4079e-40, 1.5054e-40, 1.2387e-40,
2.3040e-40, 1.4644e-40, 5.6365e-40,
-2.0809e-02, 5.3674e-03, 1.7057e-03,
2.4160e-01, 4.1348e-01, 3.5215e-02,
8.2154e-02, 2.0431e-01, 1.0366e-01,
-1.5149e-02, 1.0521e-01, -4.1706e-02,
-5.0651e-02, 2.3615e-02, -9.3860e-02,
-1.0823e-01, -6.3645e-02, -1.1573e-01,
-2.4116e-02, 1.3546e-02, -1.0298e-03,
1.2102e-02, 2.2630e-02, 1.1375e-01,
1.3966e-02, 1.0754e-01, 1.6621e-01,
1.6213e-02, 2.0816e-01, 8.9441e-02,
-7.5452e-02, 3.4580e-03, -3.3317e-01,
5.0917e-02, 1.3898e-01, -1.0723e-01,
6.0473e-03, 8.9741e-02, -6.8206e-02,
-7.1770e-02, -3.5661e-01, -2.8935e-01,
-1.6324e-02, 2.5728e-02, -1.1281e-02,
-1.3390e-01, -9.3090e-02, 4.3366e-02,
4.8620e-02, 1.4917e-01, 1.6295e-01,
2.4123e-03, -7.6347e-02, -8.0226e-02,
6.0740e-03, 3.7065e-02, 4.5518e-04,
-1.3793e-01, 2.3848e-01, -1.1199e-01,
1.0422e-01, 1.1214e-01, 3.3457e-02,
-3.2827e-40, 5.9135e-40, 3.3773e-40,
-5.8903e-40, -5.9439e-41, 1.9973e-40,
-3.6141e-40, -4.7563e-40, -1.0222e-40,
7.3457e-02, -8.2031e-02, -2.9504e-02,
-5.3420e-02, 4.9697e-02, 7.6779e-03,
2.1180e-02, 1.1069e-02, -1.1940e-02,
1.7302e-02, 9.9063e-02, 4.8847e-02,
4.9513e-02, 2.4240e-01, 2.7174e-01,
2.7487e-01, 1.9410e-01, 3.1165e-01,
-6.7532e-03, -1.1608e-01, -5.0876e-02,
1.2107e-01, 3.1073e-01, 7.1681e-02,
-1.1411e-01, -1.7902e-01, 7.8898e-02,
-2.0117e-02, 3.6394e-01, 1.4546e-01,
-8.0861e-03, -4.3956e-02, -1.3473e-01,
5.1519e-02, -3.1122e-01, -4.6847e-02,
5.0405e-02, -1.0611e-02, -1.0557e-01,
-4.4346e-02, -1.4505e-01, 5.3977e-02,
-2.6288e-01, 1.8247e-02, -1.1606e-01,
1.0706e-01, -9.3675e-02, 1.1757e-01,
-5.0440e-02, -1.1784e-01, -4.0599e-02,
1.9618e-01, 9.9370e-02, 8.2258e-02,
2.6762e-02, -5.0740e-02, -1.8302e-02,
5.3340e-02, 6.5710e-02, 6.1552e-03,
-7.2158e-02, -3.5563e-02, 8.2140e-02,
3.1534e-40, 3.6427e-40, 3.0437e-40,
4.2856e-41, -4.7870e-40, 5.6317e-40,
-2.4673e-40, -6.9736e-41, 8.1050e-41,
1.4544e-01, 8.2490e-02, -9.2349e-03,
2.6124e-01, 2.7494e-01, -5.4946e-02,
1.8233e-01, 1.2428e-01, -6.7498e-03,
9.7639e-02, -6.2085e-03, 4.8154e-02,
2.7379e-02, -1.8443e-01, 4.0402e-02,
1.8893e-03, -5.2282e-03, 6.7548e-03,
-1.6559e-01, 9.7901e-02, -1.1869e-01,
-2.1287e-01, 4.1023e-01, -9.7379e-02,
-1.3767e-03, -1.6343e-01, -9.5059e-02,
-1.3547e-01, 2.0094e-01, 1.0102e-01,
-2.1311e-01, -1.5088e-01, 1.8175e-01,
4.6946e-02, -1.3963e-01, 1.0220e-01,
1.7536e-01, -2.4758e-01, -1.1481e-02,
6.1596e-02, -4.0352e-01, -1.4348e-01,
3.1690e-02, 1.7240e-01, 7.0780e-02,
9.9953e-02, -1.4154e-01, -8.3038e-02,
1.4527e-01, -2.1430e-01, -7.5840e-02,
1.6146e-01, 3.7508e-02, 5.3833e-02,
1.6723e-01, 1.7113e-01, -4.8512e-02,
2.1319e-01, 4.7031e-01, 1.1570e-01,
2.0330e-01, 2.4636e-01, 6.9924e-02,
-2.1165e-40, -1.9259e-40, -5.0990e-41,
-7.1298e-42, -4.2590e-41, 3.1709e-40,
4.1065e-40, -4.2585e-41, 3.4243e-40,
-1.0338e-40, 4.6039e-40, -3.3818e-40,
-3.9589e-41, 5.9574e-40, -5.8014e-41,
1.4505e-41, -3.5326e-40, -3.9806e-40,
4.2423e-40, -1.7055e-40, -4.9666e-40,
2.2853e-40, -2.4684e-40, -1.3794e-40,
-5.6764e-40, -1.7905e-40, -5.8915e-40,
-1.4755e-27, -2.0405e-28, -4.8677e-30,
-7.1151e-28, -9.7603e-29, -3.5264e-30,
-2.7455e-29, -5.7734e-30, -2.8633e-31,
-5.9960e-06, -5.9595e-06, -5.8686e-06,
-6.0381e-06, -6.0191e-06, -5.9605e-06,
-5.9849e-06, -5.9981e-06, -5.9654e-06,
-4.8277e-22, -7.0529e-22, -8.7179e-22,
-4.6334e-22, -6.3505e-22, -8.8438e-22,
-3.3883e-22, -4.2421e-22, -5.9002e-22,
-2.9574e-40, 4.0860e-40, -1.5966e-40,
-6.7527e-41, 7.6661e-41, -5.9491e-40,
3.0843e-40, 8.1079e-41, -2.5140e-40,
-3.7315e-40, 9.4787e-41, 4.6794e-40,
1.9383e-40, 5.0336e-41, 3.0561e-40,
-5.4286e-40, 5.5999e-40, -4.6977e-40
}
,
{
-1.7778e-01, 5.2351e-03, 1.6035e-02,
-9.7482e-02, -1.1056e-02, -5.0999e-02,
1.7460e-01, -4.0005e-02, -5.0911e-02,
-9.3843e-02, 1.2640e-01, -1.5016e-02,
-5.2880e-01, 1.9469e-01, -9.0037e-02,
-8.9136e-02, 9.8632e-02, -1.5009e-01,
-1.8080e-01, 1.1396e-01, -2.6178e-02,
-1.6689e-02, 1.4132e-01, -6.7769e-03,
-2.1120e-02, 6.8616e-02, -7.8209e-02,
4.8237e-02, -2.5303e-02, 1.7882e-02,
-4.2852e-02, -1.5071e-02, -3.3818e-02,
1.3635e-01, 4.5330e-01, 2.1489e-01,
2.7362e-02, -7.4152e-02, 2.3185e-03,
1.8771e-01, -2.0827e-02, -7.5581e-02,
1.4675e-01, -6.5552e-02, 4.2292e-02,
1.3990e-01, -4.1598e-01, 2.1609e-03,
1.5997e-01, 1.1375e-01, -1.8272e-02,
1.9045e-02, -4.2702e-02, -2.5602e-02,
1.6432e-01, -1.2783e-01, -1.8285e-03,
2.9414e-01, 1.7401e-01, -2.6321e-01,
-1.0125e-01, 1.3565e-01, 1.5894e-02,
-3.7351e-40, 6.3010e-40, -1.2071e-40,
-4.6380e-40, 1.8442e-40, -3.5994e-40,
-2.1459e-40, -4.3455e-40, -6.1978e-41,
-2.3638e-40, -4.6965e-40, -3.4232e-40,
-1.6517e-40, 4.7178e-40, -1.6757e-40,
6.7890e-41, -4.3000e-40, 1.8323e-40,
4.5416e-40, -2.9010e-40, -1.5200e-40,
-3.5533e-40, -8.7351e-41, 6.5595e-42,
5.1625e-40, -6.0418e-40, -2.7846e-40,
-2.1861e-10, -2.2422e-10, -2.1298e-10,
-2.2653e-10, -2.3500e-10, -2.2512e-10,
-2.1802e-10, -2.2681e-10, -2.1608e-10,
-3.2862e-40, 3.4241e-40, -1.3264e-40,
2.8762e-40, 1.3843e-40, 3.0949e-40,
-3.7702e-40, 2.6194e-40, 2.1451e-40,
-3.2283e-40, -5.5487e-40, 5.8744e-40,
1.6124e-40, 3.3512e-40, 3.1454e-40,
-3.5417e-40, -5.7692e-40, 5.5184e-40,
3.5641e-40, -4.3187e-40, -3.5314e-40,
4.9246e-40, 5.9593e-40, 8.3132e-41,
-2.3841e-40, -5.6196e-40, -3.2230e-41,
4.3824e-40, -3.8344e-40, -9.9086e-42,
-2.9323e-40, 2.1916e-40, 4.4739e-40,
5.6837e-41, 5.1796e-41, -2.4338e-40,
-2.2853e-40, -3.8920e-40, 6.1587e-40,
-2.9474e-41, 4.6214e-40, -3.6292e-40,
-1.4928e-40, -3.6708e-41, 5.2020e-40,
-1.2983e-12, -2.6539e-12, -1.9817e-12,
-6.5613e-12, -1.0255e-11, -6.6919e-12,
-8.3217e-12, -1.7832e-11, -1.1086e-11,
-4.9138e-40, -9.0061e-42, 4.6251e-40,
-2.9970e-41, -2.5468e-40, -3.5660e-40,
2.5450e-40, -9.5634e-38, -3.2369e-32,
-1.0233e-06, -8.2108e-07, -1.1668e-06,
-5.9592e-07, -3.9529e-07, -5.7435e-07,
-6.0253e-07, -3.8785e-07, -4.9365e-07,
-8.9372e-37, -2.1590e-36, -2.1060e-40,
-1.5666e-35, -1.1466e-38, -2.3366e-40,
-5.4077e-38, 5.0487e-40, -3.3736e-40,
-1.5357e-13, -8.4607e-14, -1.9206e-16,
-5.5373e-13, -3.0787e-13, -1.0513e-15,
-1.0468e-13, -8.6069e-14, -2.2453e-16,
-4.7501e-14, -1.3426e-13, -1.1133e-13,
-1.3801e-14, -2.4024e-14, -3.5120e-14,
-1.9817e-17, -1.3229e-17, -3.2854e-17,
-1.4365e-18, -4.1143e-15, -9.2614e-14,
-1.1174e-19, -1.6235e-15, -1.5600e-13,
-1.2643e-21, -3.9578e-17, -1.2038e-14,
-2.9789e-40, -4.6452e-40, 1.5649e-40,
-1.8445e-40, -5.2942e-40, 2.5130e-40,
6.2269e-40, 3.9166e-41, -2.4197e-40,
9.0835e-02, -5.2035e-03, -2.5980e-02,
-1.0090e-01, -7.4167e-02, 1.3364e-01,
1.0302e-01, -1.5250e-01, 1.2417e-01,
4.7205e-02, -2.3839e-01, -1.4983e-02,
5.6824e-02, -1.8259e-02, 9.6426e-02,
5.9740e-03, -1.4198e-01, -2.1076e-01,
-1.5837e-01, 6.4749e-02, -2.1417e-01,
-3.4048e-02, 4.9638e-01, 2.0984e-03,
-1.4335e-01, 4.8295e-02, -9.2209e-02,
1.9450e-01, -1.3603e-01, 1.2008e-01,
1.6803e-01, 5.6805e-02, 1.1518e-01,
5.9320e-02, -3.8200e-02, -1.1340e-01,
-8.6877e-02, 1.1533e-01, -4.9870e-02,
-7.2811e-03, 2.5730e-01, -1.8536e-01,
-6.4965e-02, 1.0364e-01, 1.3706e-02,
4.6974e-02, -1.0049e-01, -1.7460e-01,
-1.7910e-01, 3.0771e-01, -2.5757e-01,
-2.2846e-02, -3.7491e-03, -5.2171e-03,
-4.7762e-02, -4.7776e-02, 5.1125e-01,
-2.0210e-01, 6.4815e-02, -6.1606e-02,
7.3686e-04, -1.6226e-01, -3.0327e-02,
5.6501e-40, 5.2828e-40, -5.9773e-40,
-4.3530e-40, -1.1658e-40, 4.9705e-41,
4.8101e-40, 5.0236e-40, 2.0476e-40,
-1.1412e-01, 1.3391e-01, -1.2279e-01,
1.4370e-01, 3.7617e-01, 7.1407e-02,
6.9661e-02, 3.1963e-01, -1.7089e-02,
-4.7530e-02, 6.5411e-02, -2.4915e-02,
3.3429e-02, -1.3899e-01, -3.3875e-02,
-1.9261e-02, -1.3162e-01, 1.1415e-01,
2.0599e-02, -3.8667e-02, -7.2190e-02,
-2.1112e-01, -1.6525e-01, -2.3430e-02,
-1.2287e-02, -2.6637e-01, 1.0859e-03,
-2.8564e-02, 4.8846e-02, 4.2412e-02,
1.4632e-01, 1.5974e-02, -1.0699e-01,
5.5661e-02, -2.0952e-01, 2.4151e-02,
-2.3510e-02, -5.0570e-02, 1.0799e-01,
1.7495e-01, -1.5788e-03, -1.6447e-02,
7.7642e-02, -9.3888e-02, 1.3891e-03,
2.2658e-02, 1.4058e-01, 1.0639e-01,
-5.5626e-02, -3.0794e-01, -5.7160e-02,
1.0874e-01, -8.3907e-02, 4.2106e-02,
1.7688e-02, 1.8090e-01, -2.1718e-03,
-1.0659e-02, -2.1302e-01, 1.0056e-01,
-6.0693e-02, -2.3624e-02, 6.3688e-03,
-2.7320e-40, -1.3336e-40, 2.4202e-41,
-7.1225e-41, 1.2848e-40, 1.5426e-40,
-4.2798e-40, 6.5075e-41, 6.2629e-40,
1.6905e-01, -1.7379e-01, -2.1360e-02,
-2.9396e-01, 1.1782e-01, 7.9111e-02,
-6.4767e-03, -1.9949e-01, 5.4243e-02,
-3.2753e-02, -1.5810e-01, 5.2257e-02,
-1.8133e-02, 2.0548e-01, -2.8071e-01,
-5.3725e-02, 8.4067e-02, -7.4639e-02,
8.9137e-02, -2.3078e-01, -1.9626e-01,
3.1276e-01, 1.5332e-01, -1.9590e-01,
-1.8318e-02, 6.8460e-02, 9.1476e-03,
8.2398e-02, 8.5883e-03, 7.6830e-02,
-1.4580e-01, 4.6253e-01, -3.1900e-01,
-1.1051e-01, 6.3807e-02, -2.5130e-02,
-1.2029e-01, -3.8982e-03, 2.1654e-02,
-3.2017e-01, 2.0265e-01, -1.7311e-01,
-1.3229e-02, 1.3805e-01, -6.2689e-02,
-3.6619e-02, -1.9366e-01, 2.7177e-01,
5.5937e-02, 7.9713e-02, -2.3872e-01,
-3.9690e-02, 2.2914e-02, -1.7779e-02,
1.1110e-01, 1.6618e-01, 3.6139e-01,
7.9777e-02, 4.3655e-01, 3.0597e-01,
-5.5125e-02, 6.1229e-02, 1.2414e-01,
2.1644e-40, 7.2343e-41, 5.5580e-40,
-4.3927e-40, 5.0561e-40, -1.5560e-41,
-3.2783e-40, -8.8219e-41, 5.4415e-40,
-6.7176e-02, -3.4930e-02, -2.7087e-02,
1.0489e-01, 2.1178e-01, -1.6752e-01,
-1.2627e-01, -2.4207e-01, -7.4667e-02,
-3.1470e-02, -1.3365e-02, 8.7742e-02,
-2.2809e-02, -4.7991e-01, 2.4740e-02,
6.4418e-02, 3.4818e-02, -2.9275e-01,
-2.8830e-01, -7.0458e-02, 7.8922e-02,
-1.4436e-01, 4.1068e-02, 6.2896e-02,
4.1061e-03, 2.1844e-01, 9.0488e-02,
-1.1085e-01, 8.3761e-02, 3.2634e-02,
3.2470e-01, -2.7760e-01, 4.1235e-02,
8.6625e-02, 2.6816e-01, -1.3560e-01,
3.8789e-01, 3.2406e-01, 1.0631e-01,
7.5131e-02, -2.0206e-01, 1.3027e-01,
4.0382e-02, 2.4350e-01, -3.6042e-03,
-1.0063e-01, 1.9418e-01, -7.7039e-02,
9.4531e-03, 7.1605e-02, 1.4004e-01,
-2.0591e-02, 4.5944e-02, -2.6721e-03,
-3.4665e-03, 2.2560e-01, -8.2930e-02,
-1.5507e-01, 2.7206e-01, -2.8665e-02,
-3.4909e-03, 1.7696e-02, -8.5492e-02,
2.1541e-40, -3.3029e-40, 1.7678e-40,
-3.9857e-40, -1.1965e-40, -8.6754e-41,
-4.0721e-40, 2.2073e-41, 4.2728e-40,
-1.0496e-02, 5.4120e-02, -1.6498e-02,
-5.9387e-02, 2.3757e-01, -8.0381e-02,
2.3739e-02, -1.3715e-01, -3.0906e-02,
-8.5760e-03, 2.4518e-02, -6.9090e-02,
2.1623e-02, 8.9641e-02, 9.9031e-02,
-1.0052e-02, 4.6506e-02, -1.5756e-01,
8.5003e-02, -3.6434e-03, 1.3816e-02,
9.0532e-02, 2.3661e-01, 1.8077e-01,
2.8120e-02, 4.3753e-02, 2.2981e-02,
3.5830e-02, 5.7995e-02, -5.6879e-03,
3.7708e-02, -2.6373e-01, 2.0886e-01,
-4.0632e-02, 1.6891e-01, -6.8996e-02,
-1.1972e-01, -4.3628e-02, 2.0278e-02,
-1.4818e-01, 4.0844e-02, 1.5917e-01,
-4.5684e-02, 1.4075e-01, -2.0784e-02,
-1.1533e-03, -2.7897e-01, -8.8707e-02,
-1.7907e-02, 1.8400e-01, 1.1026e-01,
-2.3183e-03, 6.3875e-02, -4.2394e-03,
3.2021e-02, -8.8955e-02, -2.2298e-02,
8.1353e-02, 3.3079e-01, -2.0616e-01,
-3.5802e-02, 4.9804e-02, -9.2712e-02,
-1.5940e-07, -1.6158e-07, -1.5812e-07,
-1.6273e-07, -1.6555e-07, -1.6260e-07,
-1.5867e-07, -1.6192e-07, -1.5975e-07
}
,
{
-1.5080e-02, 1.1294e-01, 7.1187e-02,
1.1628e-02, -8.4938e-01, 8.5457e-02,
-3.9642e-02, -2.3879e-02, 1.0029e-02,
2.6648e-40, 9.1590e-41, 3.3285e-40,
-3.3445e-40, -2.5194e-40, -2.0946e-40,
3.6800e-40, -1.1584e-40, 6.2195e-40,
-1.3560e-41, -8.0151e-41, 4.4048e-40,
-4.1209e-40, 2.7411e-40, 3.2419e-40,
5.8333e-40, 1.1503e-40, -5.0783e-40,
-5.5301e-02, -2.4971e-02, 4.9251e-02,
-2.5589e-01, 1.6560e-01, -8.0956e-02,
4.0518e-01, 3.1320e-02, -1.4262e-01,
1.2250e-02, 5.1989e-02, 3.0706e-03,
-7.9534e-02, -1.9801e-01, -2.7791e-02,
2.1768e-01, 6.9978e-02, -4.2325e-02,
-1.9165e-02, -2.1179e-02, -2.1558e-02,
3.6816e-01, -5.2929e-02, 9.5790e-02,
2.8095e-01, -1.4731e-01, 3.4182e-02,
2.3702e-02, 4.0764e-02, 3.5767e-02,
-8.4586e-02, 1.9025e-01, -1.6794e-01,
-1.0273e-02, 3.2259e-01, -1.5841e-01,
2.6794e-01, 5.2084e-02, 1.2761e-02,
-1.1169e-01, -1.7808e-01, 1.1363e-01,
-1.3808e-01, -1.7764e-02, -1.7420e-02,
1.5840e-02, -2.3405e-01, 7.6361e-03,
-6.6082e-02, 7.9778e-02, -2.0423e-01,
-1.9594e-02, -6.3370e-02, 3.3351e-02,
-2.0396e-40, -3.0207e-40, -3.2364e-40,
2.3575e-40, 5.8301e-41, -3.7432e-40,
-3.6291e-40, 3.3441e-40, 1.4574e-40,
-4.3792e-40, -2.5814e-40, -3.4986e-41,
-3.4920e-40, -4.4757e-40, 3.2192e-40,
4.7222e-40, -7.3197e-41, -3.4635e-40,
5.1495e-02, 7.8843e-02, 4.2243e-02,
-2.1245e-01, 1.9568e-01, 7.9369e-03,
2.2795e-02, 2.2801e-02, 7.6895e-02,
3.0044e-01, -1.4041e-01, -2.3677e-02,
-1.1656e-01, -7.5113e-02, 1.0625e-02,
-1.2133e-02, 5.0658e-02, -7.2944e-02,
-3.3652e-02, -2.0452e-01, -4.1048e-02,
2.8531e-01, 1.2116e-01, -2.1526e-02,
-2.4564e-01, -4.1870e-02, -5.5819e-02,
-2.3157e-01, -2.5594e-02, 1.1154e-01,
2.1234e-01, 3.2762e-01, -2.9000e-01,
1.8591e-02, -5.9820e-02, -9.0807e-02,
-3.0027e-01, -1.8370e-01, 1.2086e-02,
2.1178e-02, 2.9559e-01, 1.2966e-01,
6.8542e-02, 7.7710e-03, -6.0304e-02,
3.3019e-03, -1.9135e-02, 9.3227e-03,
-9.9003e-03, -1.0101e-01, -3.3513e-01,
-8.4091e-03, -1.5918e-02, -3.4323e-02,
3.8770e-40, -2.8639e-40, 4.6953e-40,
4.2631e-40, 6.2568e-41, -5.3500e-40,
-2.1987e-40, 1.3435e-40, 4.4101e-40,
-3.9973e-40, 6.3046e-40, 1.6046e-40,
4.4338e-40, 1.6940e-41, 4.1598e-40,
2.6132e-40, -2.9888e-40, -7.5708e-41,
-1.5991e-02, 8.2749e-02, -6.3776e-02,
-3.2220e-03, 4.1443e-02, -8.1219e-02,
-1.1231e-01, 6.7586e-01, -1.7600e-01,
-4.0371e-02, -7.9044e-02, 1.2451e-01,
4.1907e-02, -8.8159e-02, -1.1229e-01,
-4.0654e-03, -4.4087e-03, 1.2942e-01,
9.3318e-03, -6.5085e-02, 1.0165e-02,
-2.8758e-02, -4.9997e-02, 4.6069e-02,
4.2107e-04, 2.1718e-01, 3.1080e-03,
-9.1277e-03, -2.8568e-02, 1.6202e-02,
-8.2490e-03, 1.2888e-01, -1.3159e-01,
1.6065e-02, 4.0143e-02, 2.7043e-01,
-3.4809e-02, -8.1302e-03, 6.0786e-02,
5.1845e-02, 4.6995e-01, -1.0392e-02,
2.3359e-02, -1.8364e-01, -3.7343e-01,
-8.2996e-02, 9.7724e-02, -6.1012e-02,
2.8225e-02, 8.8706e-02, 1.3443e-02,
3.7515e-03, 1.7772e-02, 6.5945e-03,
-7.3847e-12, -7.5629e-12, -6.9337e-12,
-7.6292e-12, -7.8624e-12, -7.2877e-12,
-7.0582e-12, -7.3197e-12, -6.8467e-12,
1.5445e-11, 2.0754e-11, 2.0524e-11,
2.1239e-11, 2.5909e-11, 2.5983e-11,
2.0986e-11, 2.5190e-11, 2.2478e-11,
-4.7164e-02, -2.4754e-02, -1.8256e-02,
1.0526e-01, -4.6010e-03, -2.2784e-02,
-5.2028e-02, -1.6408e-01, 7.9112e-03,
-8.1863e-02, 4.2772e-02, -9.9446e-04,
-5.5521e-02, -1.1264e-01, -4.5782e-02,
-1.1026e-01, 2.1443e-02, -4.5120e-02,
-1.4141e-02, -2.8116e-03, 2.6990e-02,
-2.0201e-01, 4.3214e-01, 2.9373e-02,
-2.1768e-01, -2.7230e-02, 5.5396e-03,
5.0196e-02, 1.5506e-01, -5.7328e-02,
4.8323e-02, 3.8243e-02, -1.3533e-01,
-9.8862e-03, -5.6971e-02, -7.1500e-02,
1.0272e-01, 7.4686e-02, 7.4732e-02,
8.3744e-02, 1.5834e-01, 2.9221e-02,
6.5641e-02, 7.7697e-02, 3.5746e-02,
-1.6614e-01, -2.3128e-01, 4.4691e-02,
6.3546e-02, -3.8105e-01, 3.4110e-02,
-3.5022e-02, -2.3782e-02, 2.8664e-02,
-3.8813e-41, -2.8626e-40, -9.0218e-41,
4.1216e-40, -4.4215e-40, 3.1198e-40,
5.6281e-40, 2.0477e-40, 2.7797e-40,
-4.4903e-40, -6.2574e-41, 4.9971e-40,
5.0135e-40, -3.1945e-40, -2.4694e-40,
2.6587e-40, -4.9583e-40, -4.9771e-40,
3.7139e-02, 5.2936e-04, -2.3658e-02,
-3.6199e-01, -5.1912e-02, -5.1969e-02,
2.5415e-01, 2.4109e-01, 9.8721e-03,
5.5061e-02, -4.7469e-02, 3.0045e-02,
2.1565e-03, -2.3866e-02, -2.3496e-02,
6.0892e-02, -4.6442e-04, -5.0200e-02,
5.4971e-02, -1.7234e-02, -3.2759e-03,
4.8225e-01, -1.1234e-01, 3.8257e-02,
5.2105e-02, -2.8473e-03, -1.0355e-02,
-9.5654e-03, -1.8751e-01, 1.7079e-02,
7.0133e-02, 7.6363e-01, -8.7388e-02,
-5.6536e-02, -1.9152e-01, -1.6043e-01,
2.0359e-01, 7.4214e-02, 3.1970e-02,
-1.8199e-01, -1.9386e-01, -2.5967e-03,
-3.4609e-02, 3.3870e-02, 5.8835e-02,
8.8220e-02, 9.9265e-02, 7.1240e-03,
-9.1395e-02, -3.1699e-01, -2.9120e-02,
-1.8436e-02, -2.1432e-02, -4.5465e-02,
-3.2013e-40, 3.2019e-40, 4.8747e-41,
2.6585e-40, 6.1463e-40, 1.4176e-40,
-1.5286e-40, 3.0543e-40, 7.2032e-41,
-6.0758e-40, -3.6200e-40, 1.2123e-40,
1.3627e-40, 3.2983e-40, 3.6171e-40,
-4.2148e-40, 1.1102e-40, 3.2714e-40,
-3.4763e-02, -3.1632e-02, 3.0044e-02,
-2.0935e-01, 1.3533e-01, -9.1607e-03,
-1.5931e-01, 1.0771e-01, -6.6518e-02,
2.4399e-02, 2.2923e-03, 5.1575e-02,
-1.4154e-01, -1.0013e-02, -7.5696e-02,
1.0849e-01, 1.2575e-01, -7.3161e-02,
-1.5217e-02, -2.7659e-02, -3.1401e-02,
3.4960e-01, 7.2390e-02, 2.0722e-02,
3.9440e-01, 9.1821e-04, 1.7842e-02,
-1.5670e-02, 5.3020e-02, 6.0536e-02,
-1.8853e-01, 2.7532e-01, -1.9681e-01,
8.3258e-02, 9.4285e-02, -1.2695e-01,
2.7593e-01, 1.1456e-01, 1.6048e-02,
-5.1675e-01, 1.4727e-01, 7.5170e-02,
-6.9143e-02, -9.2948e-02, 3.4687e-02,
1.4128e-02, -7.9962e-02, 8.0446e-02,
3.7011e-02, -1.3400e-01, -2.0725e-02,
-6.4981e-03, 7.0724e-02, 6.6167e-02,
-4.5940e-41, 2.5437e-40, -3.3111e-40,
5.9661e-40, 6.2521e-40, 5.6418e-40,
1.9187e-40, -5.8872e-40, 5.5747e-40,
-1.6402e-11, -2.2097e-11, -1.7224e-11,
-2.2755e-11, -2.9977e-11, -2.1231e-11,
-1.3688e-11, -1.7479e-11, -1.3081e-11,
6.4790e-03, -3.8464e-03, -1.0008e-02,
-2.6001e-02, -7.9483e-02, 3.3711e-02,
2.6659e-03, -3.2634e-02, 1.0767e-02,
4.9939e-03, 1.4064e-02, -3.4294e-02,
4.8529e-02, 6.3386e-01, -3.6805e-02,
-1.3703e-01, 2.5878e-02, -4.8617e-02,
3.2186e-02, 6.6382e-02, 1.9305e-02,
7.0196e-02, -1.6892e-01, -2.8980e-02,
9.7762e-02, 9.7998e-03, -5.1620e-03,
5.0753e-02, -4.5071e-03, -3.9836e-02,
-6.0381e-02, -9.2016e-02, 9.5433e-02,
-1.0045e-02, 8.7955e-03, 4.9429e-02,
-1.8363e-02, -1.1912e-01, 9.7347e-03,
-1.5657e-01, -2.1035e-01, -4.9737e-02,
-3.0025e-02, -6.4959e-02, -5.6107e-02,
3.2927e-40, 5.7263e-40, 6.2889e-40,
-6.0716e-39, 5.3050e-41, -1.7152e-40,
-3.2493e-38, -1.5841e-40, -1.9343e-40,
4.9763e-40, 5.5142e-40, -4.3462e-40,
-2.2649e-40, 1.4321e-40, -2.6779e-40,
2.3072e-41, 5.4080e-40, -6.4200e-41,
2.2827e-40, -5.4515e-41, -4.1768e-40,
3.9033e-40, 6.1988e-41, 5.9877e-40,
-4.3355e-41, -5.1088e-40, 5.9845e-40,
-4.8238e-40, -1.8586e-40, 4.8699e-40,
-9.7225e-41, 4.3387e-40, -4.3683e-40,
-7.9278e-41, -5.3614e-40, 2.1911e-40,
-3.3982e-40, -5.3335e-40, 3.8540e-40,
1.9051e-40, -2.0840e-40, 2.2868e-40,
-3.5020e-40, -3.4276e-40, 2.7395e-42,
3.9197e-40, 6.1843e-40, -1.5888e-40,
4.3516e-40, -6.1852e-40, -5.3692e-40,
-4.3268e-40, 3.5154e-40, 3.4477e-40,
-4.8414e-40, 2.2647e-40, -2.5591e-40,
4.6326e-40, -3.0462e-40, 4.7817e-40,
-4.9853e-40, -5.3425e-40, -2.9848e-40,
-1.3329e-07, -1.3784e-07, -1.3049e-07,
-1.3376e-07, -1.3905e-07, -1.3204e-07,
-1.2479e-07, -1.2994e-07, -1.2410e-07
}
,
{
-2.5964e-02, 2.9670e-02, 1.2100e-01,
-3.0371e-02, -1.5277e-02, -1.8589e-01,
-1.8650e-02, -1.2852e-01, -6.6297e-02,
9.7934e-04, -5.1835e-02, -1.0278e-03,
-1.2336e-02, 2.2130e-01, -1.2373e-01,
-2.3451e-02, 3.4217e-02, -1.0118e-02,
-3.0558e-01, -8.5390e-02, -1.4360e-02,
1.2473e-01, -1.7005e-02, -3.6816e-02,
-8.9125e-02, -6.1400e-02, -2.0623e-02,
1.3736e-02, 1.2441e-02, -4.3491e-02,
6.4806e-02, 3.7012e-01, 3.8064e-02,
-1.3731e-02, -2.4859e-01, -2.5450e-01,
-6.5111e-03, -1.4271e-01, -5.0481e-02,
5.3240e-02, -3.4843e-02, -2.2703e-02,
3.7414e-02, 1.0334e-01, -7.2237e-02,
1.4216e-02, 3.4231e-02, -2.0890e-02,
2.7879e-02, 1.3717e-01, 4.5864e-03,
3.0460e-03, -1.1734e-01, 4.4439e-02,
6.4825e-03, 1.6324e-02, 1.4928e-02,
-8.8420e-02, -1.0779e-01, -9.0653e-02,
3.1086e-02, -2.9067e-02, -8.8488e-02,
-1.6779e-40, -6.3646e-41, -6.2486e-40,
2.3154e-40, 2.8049e-40, 3.7718e-40,
-3.3950e-40, -3.1501e-40, 5.8709e-40,
2.1435e-02, -4.3732e-01, 1.5520e-02,
3.4080e-02, 1.9912e-01, -8.1413e-02,
-3.2816e-02, 5.7844e-02, 8.9258e-03,
-1.1662e-02, -1.1721e-02, 4.3033e-02,
5.2135e-02, -2.2503e-01, 2.3941e-01,
3.8400e-02, 1.8075e-01, -1.4776e-01,
2.6784e-01, 2.2817e-01, -3.0553e-03,
-6.7998e-02, -1.2050e-01, 1.4714e-02,
2.4045e-02, -1.4329e-02, -1.6705e-02,
-1.1421e-02, 4.2139e-02, 4.2944e-02,
1.8809e-02, -2.5221e-01, 9.7562e-02,
-4.1600e-02, 4.0069e-03, 7.5290e-02,
-2.0092e-02, 2.3537e-01, 2.4356e-02,
3.1957e-02, -4.8573e-02, 2.9379e-02,
6.4562e-03, -1.1527e-01, -9.1223e-02,
-2.3432e-02, 5.2881e-02, -7.3239e-02,
-3.7048e-02, -2.1481e-01, 5.9801e-05,
-4.2646e-02, -1.8366e-02, -1.0681e-01,
-1.3366e-01, -1.7123e-01, -3.5629e-02,
1.1216e-01, 1.1479e-01, 9.5297e-02,
2.4728e-02, -7.3135e-03, -3.4373e-02,
-2.3917e-40, -4.1869e-41, 3.7775e-41,
2.8931e-40, -9.4850e-41, 2.5694e-40,
3.3549e-40, -2.4334e-40, -5.5933e-41,
-2.0900e-02, 2.1203e-02, -4.7169e-02,
2.3632e-02, -7.1148e-01, 4.9722e-02,
-7.8963e-03, 5.0689e-02, 2.2619e-02,
-4.7364e-03, 3.2037e-02, 1.1004e-02,
-4.3001e-03, 2.5245e-01, 5.9112e-02,
2.8932e-02, -1.1267e-01, -2.3739e-01,
-6.5379e-02, 5.2462e-03, -1.6807e-02,
1.0960e-01, 1.7943e-01, -6.3043e-03,
9.3102e-02, 7.3103e-02, 2.5259e-02,
5.6835e-02, 4.0467e-02, 2.5447e-03,
9.4599e-02, 2.5222e-01, 6.9855e-02,
4.4758e-02, 1.8073e-01, 1.5075e-01,
2.0329e-02, -4.9412e-02, 2.0663e-02,
-7.1648e-03, 1.4986e-01, 2.1212e-01,
2.7657e-02, -6.8660e-02, 1.7321e-02,
1.0629e-02, -1.0722e-02, 2.8247e-02,
-1.1303e-02, 1.0076e-01, -4.0592e-01,
2.6744e-02, 7.3650e-02, 5.7966e-02,
2.8122e-02, -7.5961e-02, -9.4797e-03,
-1.3010e-01, -5.4184e-01, -1.3619e-01,
-1.8661e-03, -1.4357e-01, 7.9520e-03,
-1.3538e-09, -1.6580e-09, -1.7289e-09,
-1.2386e-09, -1.5132e-09, -1.5987e-09,
-1.1157e-09, -1.3420e-09, -1.4090e-09,
1.5441e-02, -1.8142e-01, -8.6802e-02,
-4.0983e-02, 2.4351e-01, -5.8181e-02,
-2.9568e-02, 3.9561e-03, 3.4181e-02,
-2.9210e-02, 2.5403e-02, 9.1331e-02,
2.3621e-02, 2.3954e-01, 5.2487e-02,
1.6509e-02, -6.2728e-02, 1.3448e-02,
1.2855e-01, 1.1892e-02, -1.3356e-02,
1.0810e-01, 1.6760e-01, -3.2040e-02,
6.2209e-02, 4.0682e-02, 3.9772e-02,
-6.1711e-03, 5.0588e-02, -1.0811e-01,
1.5744e-02, 1.6091e-01, -6.1739e-02,
-5.6717e-02, -1.0657e-02, -3.7943e-02,
-4.0595e-02, 8.0149e-02, 2.0216e-02,
3.8838e-02, -6.3586e-01, 2.3785e-01,
-1.0472e-02, 6.3899e-02, -8.2184e-02,
-1.9137e-02, 8.1163e-02, 6.7065e-02,
-2.2377e-03, 1.1860e-01, 3.4122e-02,
1.0501e-02, 2.9851e-02, 7.5841e-02,
5.8970e-02, -1.2188e-01, 7.7982e-02,
-2.6516e-02, -4.1289e-01, 2.1471e-02,
3.3957e-02, 3.5762e-02, -5.7857e-02,
-2.7357e-30, -3.4780e-30, -3.0306e-30,
-1.5188e-30, -1.9888e-30, -1.8755e-30,
-7.7431e-31, -9.7571e-31, -9.7402e-31,
-1.8497e-02, -2.4554e-02, 1.4428e-01,
1.4217e-02, -2.3647e-01, 8.4097e-02,
-1.0251e-02, -4.2137e-03, 6.0831e-03,
1.7742e-03, 2.1487e-02, 3.3147e-02,
-1.0971e-02, 3.0162e-01, 5.2391e-02,
1.8341e-02, -1.3390e-01, 9.4303e-02,
-1.5685e-01, 9.8434e-02, -1.2502e-03,
3.1370e-01, -2.8879e-02, 2.6313e-03,
1.7548e-02, 6.6741e-03, -1.7681e-03,
5.2062e-02, 6.6914e-02, 7.5256e-03,
2.4966e-02, 2.8081e-01, 2.9815e-02,
2.2375e-02, 1.4257e-03, -7.4702e-02,
1.5372e-02, 3.9587e-02, 4.6909e-02,
-2.2911e-02, -1.4568e-01, -3.8964e-01,
2.2850e-02, -4.2297e-02, 6.5736e-02,
-6.9905e-03, -6.3972e-02, -1.8430e-01,
4.4453e-03, 2.0687e-01, 3.0032e-01,
1.7243e-02, 9.8548e-03, -9.7476e-02,
-7.9682e-04, -2.1199e-01, -4.3461e-02,
-4.2929e-02, -2.8227e-01, 2.8997e-02,
-1.8741e-03, 1.1166e-02, 1.8381e-03,
-5.6725e-16, -1.0368e-15, -1.1480e-15,
-5.5537e-16, -9.9929e-16, -1.1499e-15,
-3.8787e-16, -6.4019e-16, -7.7595e-16,
4.4505e-02, 8.8803e-02, 1.1384e-02,
-3.9434e-02, 1.9319e-01, -1.2016e-02,
-4.6072e-02, 1.1769e-01, 7.4816e-03,
-3.7856e-02, -1.7147e-02, 1.5984e-01,
-2.6459e-02, 1.7469e-01, 1.2584e-01,
1.6387e-02, 1.7370e-01, -1.7350e-01,
-3.0008e-01, 2.1485e-01, -5.4302e-02,
5.7724e-02, 3.2168e-01, -2.5261e-02,
6.9277e-02, 7.5035e-02, 6.3485e-02,
-1.1688e-01, 2.6068e-02, -1.3490e-01,
-1.6085e-01, 1.9409e-01, 1.1434e-01,
-7.3819e-02, -7.7880e-02, 7.3699e-03,
-9.9972e-02, 1.3554e-01, 2.1656e-02,
-8.8303e-02, 5.4435e-01, -4.0582e-02,
-3.4805e-02, -1.5291e-01, -3.6917e-02,
-3.4377e-02, -3.3086e-02, -9.5097e-02,
-7.4538e-03, 2.2545e-01, -2.6380e-02,
1.4440e-02, 1.3205e-01, 1.6164e-01,
9.2164e-02, -8.4307e-02, 7.8922e-02,
1.2519e-01, -6.1809e-01, -1.0895e-01,
6.2744e-02, -4.4951e-02, -3.2548e-02,
-2.5422e-21, -6.3849e-21, -9.5560e-21,
-1.9248e-21, -4.7107e-21, -6.4244e-21,
-1.4638e-21, -3.1947e-21, -3.7663e-21,
-8.6113e-03, -7.0987e-02, 5.8265e-02,
-1.3148e-02, 5.6371e-01, 5.0580e-02,
1.1741e-02, -3.5614e-02, -6.1265e-02,
1.4758e-03, 3.3349e-02, -1.0867e-02,
-4.0234e-02, 1.9894e-01, 1.3972e-01,
-1.9167e-02, -4.1723e-02, -1.9982e-01,
-3.0756e-01, 2.6284e-02, -1.9058e-02,
-7.9349e-04, 1.2644e-01, 2.9567e-02,
-3.9274e-02, 1.1030e-02, -9.4885e-03,
1.3541e-02, 1.7044e-01, 8.9626e-02,
6.6814e-02, 2.6430e-01, 1.7409e-01,
-6.1034e-04, 1.7569e-02, 1.3090e-01,
-4.1941e-03, 8.9599e-02, -3.3684e-02,
-1.1310e-02, -4.3731e-01, 5.7177e-02,
-4.5718e-04, 1.0175e-01, 4.1211e-02,
2.9756e-02, -1.1601e-01, -7.3171e-02,
2.7939e-02, 2.1334e-01, -4.0210e-01,
-8.6847e-03, 8.1829e-02, 4.4225e-02,
-1.1411e-01, -1.7697e-01, -5.8087e-02,
7.9613e-02, -4.2814e-01, -1.0814e-01,
-3.0610e-02, 1.1342e-03, -2.2322e-03,
-1.1254e-10, -1.4207e-10, -1.5402e-10,
-9.9123e-11, -1.2394e-10, -1.3338e-10,
-8.8840e-11, -1.0857e-10, -1.1463e-10,
3.0283e-02, -5.6191e-02, -1.0447e-01,
-1.4578e-02, -2.8745e-01, 1.9089e-01,
-2.7251e-02, 9.8069e-02, -1.4580e-02,
-3.0276e-02, 1.4366e-02, 2.6363e-02,
-8.4962e-02, 7.8998e-02, -4.7717e-02,
-3.2004e-02, -2.1579e-02, 1.1247e-02,
1.3895e-01, -3.3900e-01, 7.7998e-03,
2.4769e-01, -1.8506e-01, -2.3116e-03,
3.1361e-02, -1.1718e-02, -1.8286e-02,
-1.3020e-01, 1.4334e-01, -5.5700e-02,
-3.5386e-02, 1.0992e-01, -8.0235e-02,
-5.8978e-03, 7.7039e-02, -7.4619e-02,
-8.1603e-02, 1.2982e-01, -7.3193e-02,
-6.1469e-02, 1.7131e-01, 4.0255e-01,
-6.4582e-03, -8.2741e-02, -2.2220e-02,
1.6876e-02, -3.2590e-02, 5.5645e-02,
2.5231e-02, 2.9984e-01, -3.6995e-02,
9.3322e-03, 2.0758e-01, -2.1986e-02,
-4.9568e-02, 2.1857e-03, 8.6127e-02,
8.6593e-02, -5.8134e-01, 3.4507e-01,
4.8855e-02, -1.0506e-01, 4.1584e-02,
2.5428e-40, -4.4558e-40, -2.2090e-40,
-2.9727e-40, -4.8454e-40, 3.0397e-40,
1.1696e-40, -3.3028e-40, -2.2959e-40
}
};
static __device__ __constant__ const float HDNL1biasL[8][8] =
{
{
-3.1869e-08, -3.8279e-01, -6.3693e-05, -5.9054e-02, 9.3774e-04, -2.9944e-02, -1.1156e-03, -7.5635e-02
}
,
{
-1.7701e-01, -1.3417e-06, -3.0706e-40, -1.9022e-06, -1.2965e-02, -6.6444e-40, 1.4699e-02, 2.6082e-02
}
,
{
-3.7577e-07, 4.4550e-03, -8.1266e-04, 3.2408e-01, -1.1321e-07, -1.8907e-23, -1.9770e-25, -3.2394e-02
}
,
{
-2.1525e-14, -1.4130e-02, -1.9410e-02, -1.8703e-02, -2.9177e-02, -4.0635e-02, 7.8097e-02, -1.1643e-01
}
,
{
-2.6309e-02, -2.2238e-02, 6.8700e-03, -1.7973e-02, -1.0893e-02, -1.1888e-02, -4.9598e-03, -6.3663e-06
}
,
{
-1.2406e-03, -2.4901e-12, -9.7265e-07, 6.3490e-03, 1.3495e-01, -3.8411e-03, -6.6630e-03, -7.3614e-03
}
,
{
-2.7729e-03, -4.8174e-03, -6.3012e-03, 2.0491e-01, -2.0110e-03, -3.0974e-03, 5.1407e-01, -3.5016e-08
}
,
{
0.0324, 0.0140, 0.6750, 0.2661, 0.3646, 0.3591, 0.5597, 0.0816
}
};
static __device__ __constant__ const float HDNL1kernelsL10[4 * 8] =
{
0.0882, 0.0422,
0.3775, 0.4754,
-0.3209, -0.4870,
-0.0384, 0.0530,
0.1034, 0.0173,
0.5011, 0.3900,
0.3621, -0.1645,
-0.1304, 0.0013,
0.2230, 0.3026,
0.1618, -0.4514,
-0.2097, 0.1894,
-0.0326, 0.1434,
0.2421, 0.3363,
-0.0938, 0.3156,
0.1137, -0.2165,
0.2273, -0.1284
};
static __device__ __constant__ const float HDNL2kernelsL1[9 * 8] =
{
-2.0676e-02, 6.7641e-03, 2.8287e-01,
2.5576e-01, 1.9765e-01, -2.4700e-01,
3.5056e-01, 2.9306e-01, -2.2245e-01,
8.4706e-02, -2.9455e-01, -5.5831e-02,
-8.4635e-02, -9.6835e-02, 3.1208e-01,
1.7690e-01, 2.7624e-02, 5.1954e-02,
-5.3869e-01, 7.2934e-02, -1.7662e-03,
-3.1402e-02, 3.1700e-01, 1.4965e-01,
3.8569e-02, 5.5025e-03, -6.6555e-03,
-4.2049e-38, -4.1971e-38, -4.1488e-38,
-4.2855e-38, -4.2871e-38, -4.2363e-38,
-4.1861e-38, -4.1974e-38, -4.1677e-38,
1.8451e-01, -5.4584e-02, 1.4494e-01,
1.3433e-01, 1.0073e-01, 2.6371e-01,
6.1261e-02, 2.2116e-01, 2.0074e-01,
5.9669e-02, -3.9168e-02, 2.1674e-01,
-2.9132e-01, 3.0285e-03, 1.2625e-01,
-4.3415e-02, 1.8663e-01, -1.6554e-01,
1.0102e-01, 6.3466e-02, 1.5225e-01,
2.1692e-01, 1.9860e-01, -7.0456e-02,
-1.6406e-03, -2.7834e-01, -3.5449e-01,
-3.0140e-01, -4.2348e-01, -5.8263e-01,
2.3140e-01, -2.6843e-01, -1.1069e-01,
-9.1484e-02, 1.1486e-02, 5.6396e-02
};
static __device__ __constant__ const float HDNL2biasL1[8] =
{
-9.0964e-02, 2.1136e-01, -1.2011e-02, -4.5657e-38, -1.4443e-01, 1.8968e-01, -2.9027e-02, 1.6199e-01
};
static __device__ __constant__ const float HDNL2kernelsL[8][9 * 8 * 8] =
{
{
4.4561e-02, 4.3527e-01, -8.9737e-02,
-4.9011e-03, 1.4879e-01, -8.2210e-02,
-1.7593e-02, 4.9294e-02, 1.8058e-01,
-3.3827e-02, -7.9055e-02, 2.6982e-01,
-5.2485e-02, -4.2046e-01, -5.6838e-02,
1.0919e-01, -7.3141e-02, 9.4797e-02,
6.2764e-02, 2.5475e-01, 1.3705e-01,
2.0997e-01, 7.3360e-01, 2.0801e-01,
-1.1500e-01, 3.1245e-01, 6.7457e-01,
-5.1481e-39, -5.1520e-39, -4.9367e-39,
-5.1383e-39, -5.1642e-39, -4.9479e-39,
-5.1323e-39, -5.1859e-39, -4.9547e-39,
1.3849e-01, 1.1564e-01, -1.8175e-01,
-5.5355e-03, -1.5117e-01, -2.4654e-01,
8.1590e-03, -1.1681e-01, 3.4700e-05,
-2.5950e-01, -1.4182e-01, 3.1814e-01,
1.7662e-01, 1.8420e-01, -1.5181e-01,
7.6233e-02, -7.8372e-02, -3.1968e-01,
-4.5770e-01, 4.1562e-02, 1.3721e-01,
-5.8444e-02, 3.3148e-02, -2.3370e-01,
1.5374e-01, -1.1162e-01, -7.4099e-03,
-1.5716e-01, -1.8356e-01, 2.1114e-02,
-3.2233e-01, 2.1064e-02, 2.7019e-01,
-1.3702e-01, 2.6969e-01, 2.1033e-01,
8.9027e-02, -7.9969e-02, 1.0096e-01,
6.6773e-02, 3.9558e-02, -7.4944e-02,
-5.9789e-02, 1.2265e-01, 3.3873e-02,
-9.7157e-03, 9.2906e-02, 6.0300e-02,
-2.2104e-03, 6.8198e-02, -1.2931e-01,
8.9288e-02, -1.2554e-01, -4.3270e-02,
1.0660e-01, 1.1609e-02, -1.2415e-01,
2.6372e-02, -3.6311e-02, 1.5625e-01,
-7.9595e-02, -3.3662e-01, -4.0760e-01,
-2.9566e-39, -2.8760e-39, -2.8816e-39,
-2.9566e-39, -2.8964e-39, -2.9115e-39,
-2.9566e-39, -2.9179e-39, -2.9130e-39,
7.9255e-02, 9.4548e-02, 8.8155e-02,
-2.8163e-02, 1.2428e-01, -6.4973e-03,
7.7875e-02, 7.4765e-02, -5.2405e-02,
-1.4886e-02, -7.1499e-02, -7.0719e-02,
9.7562e-02, 9.0948e-02, -5.6588e-02,
-1.2872e-02, -6.6390e-02, -6.4147e-02,
9.8262e-02, -2.4215e-01, -1.7051e-01,
1.8096e-01, 1.8106e-01, 1.3108e-01,
2.0649e-01, 1.2242e-01, 3.7225e-02,
-2.5125e-01, -1.0073e-01, 4.5330e-01,
1.8588e-01, -2.6809e-01, -1.5709e-01,
4.7668e-01, -2.4208e-01, -6.6012e-01,
1.3561e-01, 5.4109e-02, 6.1899e-02,
-1.9605e-02, 1.1349e-01, 3.5781e-02,
3.5513e-03, 3.1212e-02, -6.0399e-02,
5.9258e-02, -1.8175e-02, 7.3714e-02,
2.0052e-02, 4.3245e-02, -5.0879e-03,
-1.1082e-02, -1.0753e-01, -1.7896e-03,
2.9139e-02, 2.2747e-01, -6.4075e-02,
7.3097e-02, 1.5703e-01, -5.3815e-01,
1.0620e-01, -1.1386e-01, 1.7103e-01,
-3.8728e-39, -3.8299e-39, -3.8320e-39,
-3.9065e-39, -3.8445e-39, -3.8135e-39,
-3.8838e-39, -3.8114e-39, -3.8255e-39,
2.3253e-02, 6.9893e-02, 1.4774e-01,
9.6087e-02, 2.3102e-03, -3.4449e-02,
2.6819e-02, 1.0254e-01, -2.8200e-02,
3.9553e-02, 4.7191e-05, -5.5558e-02,
4.1641e-02, 5.8706e-02, -1.0337e-01,
1.1291e-01, 5.9622e-02, 7.0677e-02,
-2.5162e-01, 7.6659e-02, 1.7245e-01,
-5.8522e-02, 1.4365e-01, 2.1189e-01,
-2.8897e-02, -5.7365e-02, 1.4232e-01,
1.7854e-02, 1.7404e-03, -8.7356e-03,
-6.0777e-02, -6.2687e-02, -1.1500e-02,
-1.6468e-01, -2.5058e-01, -1.2798e-01,
2.3193e-02, 1.7209e-01, 1.6687e-01,
-3.4483e-02, -1.6846e-02, 2.5930e-02,
1.4410e-01, 4.2932e-02, -5.0149e-03,
4.7269e-02, 1.1276e-01, -9.2701e-03,
1.5323e-02, 1.3552e-02, 9.0256e-02,
-8.9393e-03, 7.0903e-02, -6.9379e-02,
1.8645e-01, 1.0543e-01, -1.5590e-01,
2.1056e-01, 1.1051e-01, -1.5514e-01,
-7.0484e-02, -1.5153e-01, -5.0873e-01,
3.2730e-39, 3.2358e-39, 3.1222e-39,
3.2642e-39, 3.2358e-39, 3.0921e-39,
3.2730e-39, 3.2358e-39, 3.0899e-39,
1.2225e-02, 1.2386e-01, 6.7712e-02,
3.1263e-02, 1.3617e-01, 1.5352e-01,
2.3405e-02, 8.5466e-02, 8.7303e-02,
-2.0372e-02, 8.3465e-02, -7.4233e-02,
1.2269e-01, 8.4046e-02, -3.6869e-02,
1.0242e-01, 7.3218e-02, -1.1496e-01,
-1.4539e-01, -2.3923e-01, -2.2818e-01,
-3.2368e-02, -7.4360e-02, 2.3493e-02,
1.7004e-01, 6.2924e-02, 8.9327e-02,
-1.1449e-01, -1.4973e-03, -7.0451e-03,
-9.3205e-02, -1.0312e-01, 4.6503e-02,
-2.2148e-01, -1.8111e-01, -1.1992e-01,
9.8140e-02, 9.9823e-02, -2.0282e-02,
-8.1973e-02, 1.4255e-01, -5.2392e-02,
8.0350e-03, -4.8299e-02, -7.7908e-02,
4.2383e-02, 3.0707e-02, 2.8560e-02,
1.0437e-01, 6.1290e-02, -9.7796e-02,
-1.7125e-02, -1.3572e-01, -1.5345e-01,
-1.3292e-01, 2.9477e-02, 6.8032e-02,
1.5741e-01, 4.0258e-01, 2.5838e-01,
1.3948e-01, 3.5713e-01, -3.9825e-01,
-1.9224e-39, -2.4076e-39, -2.4529e-39,
-1.9181e-39, -1.9894e-39, -4.0240e-39,
-1.9335e-39, -2.3920e-39, -4.0147e-39,
-2.1714e-02, -3.5299e-02, -7.5803e-03,
-2.4087e-02, 7.5265e-02, 7.6697e-02,
4.5309e-02, 8.9529e-02, 7.6510e-03,
1.0813e-02, 3.1294e-02, -2.5907e-02,
1.1962e-02, -6.8664e-03, -1.4084e-01,
7.7013e-02, -1.2305e-01, -6.7800e-02,
-9.7392e-02, 4.4082e-02, 1.4473e-01,
4.9436e-02, 2.8859e-01, 2.8252e-01,
-3.5828e-02, -7.5616e-02, 2.4875e-01,
-6.7684e-02, 1.1290e-01, 4.2827e-02,
-1.0860e-01, 1.2952e-01, 5.9784e-01,
-3.5402e-01, -3.9558e-02, -6.0775e-01,
-1.2854e-02, 1.5240e-01, 1.4115e-01,
-2.8134e-02, -1.2939e-02, -2.6203e-02,
1.1300e-01, 1.4481e-01, -5.1454e-02,
1.2688e-01, 2.8536e-02, 9.4877e-02,
9.6033e-02, -1.3901e-02, 6.0035e-02,
-1.1249e-01, 4.3971e-02, -1.0918e-01,
8.2500e-02, 2.1413e-01, 3.9015e-02,
1.8361e-01, 2.5271e-01, -2.2794e-01,
-8.1195e-02, -1.2269e-01, -2.6097e-01,
7.6827e-39, 7.7882e-39, 7.6893e-39,
7.7006e-39, 7.7857e-39, 7.7384e-39,
7.6985e-39, 7.7712e-39, 7.7399e-39,
1.4458e-02, 1.0801e-01, 1.5906e-01,
-1.4676e-02, 1.3699e-01, 9.2460e-02,
-3.6479e-02, 1.4529e-01, -2.8681e-02,
-3.3251e-02, -7.3096e-02, -1.4330e-01,
5.7009e-02, -3.1905e-02, -1.2035e-01,
1.1838e-01, 5.7011e-02, 2.0800e-02,
-1.1567e-02, -2.2125e-01, -9.3953e-02,
-7.5378e-02, -1.2069e-01, 1.3217e-01,
-7.7357e-02, -1.3171e-01, 1.2776e-01,
-1.1397e-01, -3.5183e-02, 2.2994e-02,
-6.5101e-02, -1.5019e-01, -2.7451e-02,
-2.4260e-01, -1.3543e-01, -1.9889e-02,
-1.9798e-39, -3.5282e-40, -1.9216e-39,
-1.9140e-39, -1.9370e-39, -1.9943e-39,
-1.8623e-39, -1.8665e-39, -1.9320e-39,
-4.8850e-39, -5.0283e-39, -4.9987e-39,
-5.0868e-39, -5.0814e-39, -5.0779e-39,
-5.2489e-39, -5.1086e-39, -5.1234e-39,
-2.9120e-39, -3.0278e-39, -2.9633e-39,
1.3186e-39, 6.0555e-39, 6.0419e-39,
-5.5922e-39, -8.5992e-40, -2.8529e-39,
-3.4668e-39, -3.5127e-39, -3.4668e-39,
-3.2831e-39, -3.4668e-39, -3.6734e-39,
-3.2142e-39, -3.2831e-39, -3.5816e-39,
1.3445e-39, 1.3621e-39, 1.3375e-39,
1.4539e-39, -2.2695e-40, 1.4522e-39,
1.3563e-39, 1.3339e-39, 1.3001e-39,
-4.4670e-39, -4.4026e-39, -4.3159e-39,
-4.5047e-39, -4.3505e-39, -2.7259e-39,
-4.5265e-39, -4.4721e-39, -4.4990e-39,
-1.9864e-39, -4.1379e-39, -3.7189e-39,
5.2465e-39, 2.5220e-39, 1.5639e-39,
-3.9760e-39, -5.7033e-39, -4.0978e-39,
-6.3745e-40, -4.7511e-39, 2.3456e-39,
-1.5164e-39, 5.0431e-39, 5.1197e-39,
8.7052e-40, 1.4947e-39, -1.1546e-39,
5.3140e-02, 1.0281e-01, 1.4767e-01,
-6.1530e-02, -9.4166e-02, 4.8671e-02,
5.6787e-03, -1.4551e-01, 1.5614e-02,
-3.4826e-02, -5.1148e-02, 9.7079e-02,
-1.3603e-02, -1.2249e-01, -1.9330e-02,
-6.8184e-02, -1.4344e-01, -9.4023e-03,
-7.4629e-02, 3.9634e-02, 1.3445e-01,
4.2153e-02, 7.1129e-01, 2.8703e-02,
7.8247e-02, 7.2210e-01, -6.6198e-01,
-6.1010e-39, -6.2892e-39, -6.4008e-39,
-6.0825e-39, -6.3221e-39, -6.3883e-39,
-1.4962e-39, -1.1702e-39, -1.2143e-39,
5.5512e-02, -2.1522e-02, 1.0866e-01,
-9.2812e-02, -3.5119e-02, 1.1396e-01,
-1.3922e-01, 6.7287e-02, -5.5626e-02,
-2.0492e-01, 8.1441e-02, -1.3513e-01,
4.7447e-02, 2.0081e-01, -3.1249e-01,
-1.8546e-02, 2.0680e-01, 7.3979e-02,
8.8928e-02, -4.3606e-01, -8.4823e-02,
-5.6133e-02, 3.5132e-01, 1.8633e-01,
-4.3855e-03, 5.4869e-02, 1.1658e-01,
1.7423e-01, -5.3107e-02, 2.2925e-02,
-1.7622e-01, 4.4453e-02, 2.8131e-02,
2.6863e-01, -2.9085e-01, -1.5098e-01
}
,
{
-2.4230e-40, 5.4425e-39, 3.4517e-39,
-1.9803e-39, -1.5207e-39, -3.5630e-39,
-4.9409e-39, -2.9280e-39, 7.7966e-40,
2.4867e-39, -2.1848e-39, 3.2524e-39,
-6.2860e-39, 4.0411e-39, -3.6956e-39,
-3.3384e-39, -1.0908e-39, 5.4261e-39,
-3.6691e-40, 9.4949e-40, -1.7279e-39,
-1.0644e-39, -2.1371e-39, -2.5125e-39,
2.9368e-39, -5.3820e-39, -3.9771e-40,
-1.4703e-39, -3.6960e-39, -4.4161e-39,
8.2800e-40, -4.9175e-39, 3.1868e-39,
5.5703e-39, -3.0263e-39, -1.6991e-39,
5.2691e-39, 4.8127e-39, 4.1346e-39,
-1.3013e-39, -1.7101e-39, -3.5467e-39,
1.1496e-39, 2.0938e-39, -4.2970e-39,
-5.5314e-39, 6.4852e-40, -5.0870e-39,
3.9377e-39, -4.1683e-39, -3.5404e-40,
-3.6188e-39, 5.4657e-39, 2.1279e-39,
3.4090e-40, 2.4425e-40, 9.3423e-41,
-2.3450e-39, 3.1518e-40, 4.3061e-40,
-2.6175e-39, -2.4696e-39, -2.3755e-39,
2.2764e-39, -4.4934e-39, 8.5722e-40,
5.1798e-39, 2.7072e-39, 5.3750e-39,
5.4335e-40, 3.8556e-39, -3.4799e-39,
-4.8963e-39, -1.1413e-39, -5.3918e-40,
6.1843e-39, -1.8521e-39, -1.3450e-39,
-2.0906e-39, -3.2544e-39, -2.8205e-39,
5.3550e-39, -3.0202e-39, -3.4181e-39,
-3.0043e-39, -3.2900e-39, -3.2915e-39,
6.1849e-39, -3.3421e-39, -3.3995e-39,
-4.8657e-39, -4.7034e-39, -4.7467e-39,
-4.6555e-39, -4.6045e-39, -4.6954e-39,
-4.8886e-39, -4.7333e-39, -4.7805e-39,
-2.0900e-39, -1.9429e-39, -2.0572e-39,
-2.0270e-39, -1.9074e-39, -1.9275e-39,
-2.1243e-39, -2.1134e-39, -2.1539e-39,
-4.4175e-39, -4.6412e-39, -4.6582e-39,
-4.6364e-39, -4.8757e-39, -4.6795e-39,
-4.4571e-39, -4.5038e-39, -4.4570e-39,
-3.2662e-39, -3.1163e-39, -3.2050e-39,
-3.2098e-39, -3.0887e-39, -3.1635e-39,
-3.3183e-39, -3.1411e-39, -3.2824e-39,
8.6839e-40, 5.7318e-39, 1.8373e-40,
4.6732e-39, -4.5549e-41, 1.2817e-39,
3.7642e-41, -6.2591e-39, -5.0492e-39,
5.0057e-39, 6.0612e-39, 2.0220e-39,
3.7436e-39, 4.8326e-39, 3.1353e-39,
3.5289e-39, 4.7177e-39, 6.2666e-39,
-1.4963e-01, -8.0360e-02, -7.9054e-02,
-1.3731e-01, 5.0766e-02, 6.9673e-02,
3.2213e-02, 3.3250e-02, 1.3170e-01,
-2.9718e-02, -2.6931e-02, 1.5768e-02,
5.9232e-02, 7.8471e-02, 9.9465e-02,
2.4872e-02, -4.4226e-02, 3.2357e-02,
-6.0139e-02, -2.2756e-02, -5.5412e-02,
4.5363e-02, 1.6393e-01, 3.7428e-02,
5.2497e-02, 9.5435e-02, 9.7155e-02,
8.2849e-02, 5.9711e-02, 1.4352e-01,
1.1756e-02, 1.5440e-02, 1.3039e-01,
4.3324e-03, 5.9119e-02, 1.1129e-01,
-3.9591e-03, 5.8617e-02, -1.3843e-02,
-2.9949e-02, 3.4877e-02, 5.0679e-03,
3.7278e-02, -2.5221e-02, 1.2191e-01,
1.5626e-01, 8.9797e-02, -1.5458e-02,
1.5607e-01, 1.4561e-02, 1.1720e-01,
-1.6112e-02, 7.7908e-02, -6.1322e-02,
3.8589e-39, 3.9262e-39, 3.8641e-39,
3.9450e-39, 3.8805e-39, 3.9383e-39,
3.8384e-39, 3.8027e-39, 3.7700e-39,
6.2294e-02, -5.6804e-03, -4.7293e-01,
1.3161e-01, 3.1187e-01, -1.8013e-01,
4.9908e-02, 9.8583e-02, 3.8863e-02,
-1.7400e-39, 3.5779e-39, 5.2800e-39,
-1.6845e-39, 4.7140e-39, 2.4244e-39,
-1.3654e-39, 2.4123e-40, -1.5360e-39,
-1.0409e-39, 1.8590e-39, -5.2161e-41,
-8.5110e-40, -1.7210e-39, -4.6624e-39,
5.0754e-40, -2.6248e-39, -5.4801e-39,
-4.9486e-39, 2.8984e-39, 4.9357e-39,
-1.4077e-39, 3.8778e-39, 5.8202e-39,
-4.1095e-39, 6.8891e-40, 5.6565e-39,
3.8021e-39, -5.4740e-41, 2.1795e-39,
-2.4185e-39, -5.8101e-39, 1.5651e-39,
-4.9775e-39, 6.0152e-39, -5.2337e-39,
-4.4350e-39, -3.8239e-39, 3.1624e-40,
-4.3665e-39, -3.0919e-39, -4.7675e-39,
-2.3335e-39, 1.8270e-39, -5.5077e-39,
5.5906e-39, 6.7732e-41, 3.7359e-39,
-5.1412e-40, -2.3239e-39, 5.1937e-39,
-4.4951e-39, -3.4928e-40, -5.0589e-39,
4.9149e-39, 1.1372e-39, 6.6368e-40,
-1.8870e-40, -5.9117e-40, -1.3973e-39,
-2.3555e-39, -1.0637e-39, 3.1692e-39,
-4.8054e-39, 4.8090e-40, 2.0873e-39,
3.8301e-39, -3.8642e-39, 4.8187e-39,
-1.6563e-39, 8.9890e-40, -3.5162e-39,
-2.3010e-01, -7.4445e-02, -1.0006e-01,
-2.4543e-01, -8.5750e-02, 1.4859e-01,
-1.3783e-01, 1.2709e-01, 2.5012e-01,
1.0310e-01, -2.3520e-02, -8.1277e-02,
-2.9267e-02, 1.0686e-01, 4.6287e-02,
-1.2342e-02, -1.7104e-02, 8.4357e-02,
-1.8492e-02, -2.0711e-02, -3.5242e-02,
7.6163e-02, 6.0853e-02, 9.4248e-02,
6.2008e-02, 1.1373e-02, 2.6609e-02,
-7.8135e-02, 1.0672e-01, -5.8380e-02,
7.1618e-02, 2.7966e-04, 1.1835e-01,
1.1306e-01, -7.8578e-03, 5.1743e-03,
-1.2123e-01, 4.9640e-02, 7.3827e-02,
-1.0377e-01, -3.7377e-02, -3.6536e-02,
5.7489e-02, -4.6279e-04, 9.0068e-02,
4.0784e-05, -3.3328e-02, 5.1191e-02,
9.6538e-02, 7.1779e-02, 1.2121e-01,
1.1598e-01, -5.9055e-02, 8.2671e-02,
-1.7292e-39, -1.7848e-39, -1.7308e-39,
-3.2817e-39, -1.7274e-39, -3.3601e-39,
-1.7252e-39, -3.4067e-39, -1.7783e-39,
-7.4053e-02, -4.2785e-01, -4.7597e-01,
4.6309e-01, 7.6018e-02, -3.5885e-01,
3.0428e-01, 8.7449e-02, 9.7880e-02,
-3.4191e-02, 1.1834e-01, -4.3273e-02,
-6.0782e-01, 9.2387e-01, -1.3972e-01,
3.0665e-01, 4.7445e-01, 4.8683e-02,
-1.8865e-02, 9.9509e-02, -4.9881e-02,
2.1640e-02, -2.0941e-01, -1.4779e-01,
1.7808e-01, -1.2572e-01, -9.6756e-02,
-1.0143e-01, 8.3153e-02, -1.0478e-01,
1.6201e-01, 2.0740e-01, -1.2653e-01,
8.1654e-02, -7.6224e-02, -8.9864e-02,
4.5383e-02, -3.6893e-02, -1.0096e-01,
2.0389e-01, 2.2557e-01, -1.9685e-01,
-9.5198e-02, 2.2877e-01, 2.1135e-02,
-1.0919e-01, -1.7563e-01, -3.5255e-01,
-1.3447e-01, 3.3709e-01, -1.9043e-01,
-2.1422e-01, -2.8848e-01, -5.3921e-02,
5.5351e-02, -5.0579e-02, -1.6168e-01,
2.5282e-01, 1.9715e-01, -2.4035e-01,
-3.0800e-02, 1.9329e-01, -1.0893e-01,
-3.4416e-39, -1.8080e-39, -1.6625e-39,
-1.6612e-39, -1.7397e-39, -1.5953e-39,
5.3047e-39, 5.4221e-39, -1.1665e-39,
2.1838e-02, -7.0635e-02, 3.6095e-01,
5.1096e-01, 6.3838e-01, 5.0716e-01,
1.1642e-01, 1.8546e-01, 1.5989e-01,
1.0799e-01, 2.8380e-01, 1.4910e-01,
-2.4305e-01, 2.3084e-01, -9.9982e-02,
-4.6839e-01, 6.0376e-01, -1.2748e-02,
8.7608e-02, 9.8828e-02, 2.1469e-02,
-3.5384e-03, -1.5689e-01, -1.1411e-01,
2.0728e-02, 5.6814e-02, -1.1090e-02,
-3.9301e-02, -9.4325e-02, -6.2119e-02,
1.2842e-01, 9.7466e-02, -2.7502e-02,
1.6560e-01, 1.5058e-01, 2.2821e-02,
-8.1287e-02, -6.3940e-03, 3.2162e-02,
9.4116e-02, -6.2567e-02, -1.2704e-01,
5.4654e-02, 1.4885e-02, 3.8166e-03,
1.9830e-01, -2.5419e-01, -6.7067e-02,
3.2303e-01, 1.6037e-01, -3.0200e-02,
1.3011e-01, 7.5455e-02, -1.2726e-02,
-1.9198e-01, -1.5419e-01, -7.5420e-02,
1.6070e-01, -6.1031e-02, -2.0179e-01,
-1.5829e-02, 1.9918e-01, 1.0960e-01,
-5.5215e-39, -5.8659e-39, -5.5573e-39,
-6.2394e-39, -6.0172e-39, -6.0159e-39,
-4.0308e-39, -4.1217e-39, -4.1372e-39,
1.6143e-01, 1.7271e-01, 4.3534e-01,
-2.4312e-01, 4.0146e-01, 4.4693e-01,
1.5442e-01, 3.9885e-01, -1.4357e-01,
-6.0236e-02, -1.2324e-01, 6.1197e-02,
-2.5842e-02, -1.0266e-02, 1.5670e-03,
2.9103e-02, 2.9966e-02, 1.1286e-01,
3.4528e-02, 1.3039e-01, 9.2736e-02,
3.5193e-02, 5.6583e-02, 5.9465e-02,
1.2846e-01, 9.3387e-02, 9.2131e-02,
1.4974e-03, 1.0196e-01, 6.7632e-02,
8.9809e-02, 5.7568e-02, -6.0621e-02,
-2.7582e-03, 3.1935e-02, 3.1299e-02,
1.3595e-01, 4.9498e-02, 1.2535e-01,
-3.9396e-02, 4.8859e-02, 4.1389e-02,
3.7026e-02, 1.3667e-01, 7.5657e-03,
-5.3476e-02, 1.9677e-02, 9.5214e-02,
1.3136e-02, 7.5560e-02, 6.2428e-03,
-5.2378e-02, -1.8704e-02, 1.0657e-01,
-4.2938e-02, -5.0199e-02, 1.4357e-01,
-5.7002e-02, 1.4158e-01, 4.9442e-02,
-6.8383e-02, 1.1316e-01, 5.2071e-02,
1.5031e-40, 2.1250e-40, 1.8673e-40,
1.5681e-40, 1.3104e-40, 1.6173e-40,
2.1560e-40, 1.8582e-40, 1.7747e-40,
8.4848e-02, -1.9845e-01, -5.1844e-01,
3.0959e-01, 3.6682e-01, 3.1208e-02,
1.9871e-01, 2.8318e-01, 1.6066e-01
}
,
{
-2.7283e-39, -4.9031e-39, -2.1039e-39,
-1.0327e-39, -5.1679e-39, -4.3300e-39,
-5.2613e-39, -3.1707e-39, -6.0916e-39,
1.5840e-39, 1.6709e-39, 1.6120e-39,
1.6716e-39, 1.7418e-39, 1.6624e-39,
1.5922e-39, 1.7383e-39, 1.5668e-39,
1.1389e-01, -4.5774e-02, 6.1423e-02,
1.3858e-01, 2.3102e-02, -6.5079e-02,
1.3269e-01, 3.2387e-02, 7.6966e-02,
-2.1531e-39, -1.6063e-39, -3.2070e-39,
-2.8531e-39, 4.6956e-39, 1.4038e-39,
2.0509e-39, -4.4924e-39, -5.3658e-39,
1.1524e-01, -5.0115e-02, 9.4187e-02,
4.2477e-02, 1.4197e-01, 2.4986e-02,
-2.8688e-02, 9.2289e-02, 4.1965e-02,
-2.1691e-01, -6.6916e-04, -1.3026e-01,
-1.9143e-01, 1.2211e-01, 1.2562e-01,
-1.2273e-01, 7.1045e-02, 1.2396e-01,
-8.0861e-02, -4.4301e-03, 6.3144e-03,
3.0338e-02, -8.6463e-03, 5.5084e-02,
-1.8370e-01, -5.0287e-02, -7.2194e-02,
7.4570e-02, 5.4483e-02, -1.2639e-02,
1.2481e-01, 1.4683e-01, -4.7581e-02,
1.6748e-01, -3.1374e-02, -1.7271e-02,
1.9801e-39, -3.3469e-39, -4.7012e-39,
-2.9869e-39, -3.2752e-39, -2.2142e-39,
-4.2927e-39, -1.9635e-39, -8.7517e-40,
2.7286e-39, 2.7755e-39, 2.7501e-39,
2.7114e-39, 2.7711e-39, 2.6858e-39,
2.5562e-39, 2.6523e-39, 2.5846e-39,
1.4015e-01, 1.0486e-01, 1.2320e-01,
4.6545e-02, 1.2068e-01, 9.2531e-02,
1.0717e-01, 3.8738e-02, 1.0181e-01,
-7.4503e-40, -1.1490e-39, 6.1230e-41,
2.4896e-39, 5.3740e-39, -1.4060e-39,
1.9095e-39, -7.1020e-40, 3.5820e-39,
-1.4348e-02, 6.4128e-02, 6.1082e-02,
-1.1112e-02, 8.5993e-02, 2.4835e-02,
1.2794e-01, -9.1072e-02, -1.3487e-02,
-5.8057e-02, 1.3080e-01, 1.0895e-01,
-1.6436e-01, 9.8593e-03, 1.5586e-02,
-1.5336e-01, 3.6391e-02, 1.4539e-01,
-4.6112e-02, 3.0102e-02, 6.2460e-02,
-2.5510e-02, 2.0437e-02, -5.6816e-02,
-1.0308e-01, -1.5284e-01, -7.1036e-02,
5.5290e-02, -6.6632e-02, 4.2268e-02,
-2.7665e-02, 9.3415e-02, 5.1026e-02,
1.5652e-01, 1.0835e-01, 9.6131e-02,
-4.2583e-39, -3.4889e-39, -5.7522e-39,
4.2701e-40, 2.8095e-39, -3.5579e-39,
2.2286e-39, 4.9865e-39, 4.0469e-39,
-6.4320e-40, -3.3384e-39, -5.9025e-39,
-7.9075e-40, -3.0577e-39, -6.0007e-39,
-8.9627e-40, -2.8374e-39, -5.8866e-39,
6.3645e-03, -5.3080e-03, -5.1759e-02,
1.0665e-01, -6.3126e-02, 5.0918e-02,
7.2193e-02, -6.8836e-02, -6.5657e-02,
2.8519e-39, -5.0955e-39, -9.6085e-40,
-3.3563e-39, -5.6038e-39, -1.6256e-39,
2.6872e-39, 1.4728e-39, -1.9908e-39,
-1.5254e-02, 9.8323e-02, 4.5504e-02,
1.3855e-01, 6.9300e-02, 1.9135e-01,
-5.2321e-02, -6.0227e-03, -1.1734e-04,
-1.4457e-01, 9.2761e-02, 4.5219e-02,
-3.0361e-01, 3.4673e-01, -2.3110e-01,
2.1017e-01, 2.4983e-01, 3.1659e-01,
-6.0569e-02, -5.4348e-02, -7.6719e-02,
-6.5060e-02, 2.8902e-01, 8.0732e-02,
-3.3425e-01, -3.1361e-01, -2.7183e-01,
2.8035e-02, -5.8134e-02, -4.3880e-02,
-1.6375e-02, 9.8195e-02, -7.4011e-02,
-5.9523e-02, 1.0234e-01, -5.3357e-02,
2.3364e-39, -2.5324e-39, -4.8333e-40,
2.2903e-41, -3.3061e-39, -2.5779e-39,
-1.8164e-39, -4.9236e-39, -4.9272e-39,
-1.2809e-39, -1.1698e-39, -1.2564e-39,
-1.3111e-39, -1.1778e-39, -1.2543e-39,
-1.4772e-39, -1.4021e-39, -1.4721e-39,
8.8919e-02, -3.4541e-03, -4.9619e-02,
1.0997e-01, 1.0257e-01, 6.9950e-02,
9.2624e-02, 3.2712e-02, 8.7916e-02,
-5.0242e-39, -6.1320e-39, 8.7891e-40,
-4.9951e-39, 2.3873e-39, -2.7823e-39,
-3.6739e-39, -1.8903e-39, 5.2150e-39,
9.6288e-02, 9.7568e-03, -5.8178e-02,
2.3313e-02, 1.1725e-01, 1.0291e-01,
-1.0111e-01, 8.3706e-02, 9.6575e-03,
-8.2531e-02, 7.0089e-02, 1.0821e-01,
-1.1016e-01, 1.8977e-01, 2.5576e-01,
-1.0221e-01, 5.9236e-02, 6.1678e-02,
2.6234e-02, 9.6868e-02, 9.2432e-02,
4.9881e-02, 5.9121e-02, -1.0477e-02,
-1.4693e-01, -1.0030e-01, -1.0608e-01,
1.1936e-01, -2.2301e-02, 1.1363e-01,
1.3981e-01, 6.7734e-02, -8.2775e-02,
1.0404e-01, -7.7360e-03, 4.2523e-02,
-2.6052e-39, 5.7201e-39, -5.6049e-39,
-3.6314e-39, -5.9232e-39, -3.6970e-39,
3.4360e-39, -5.6848e-39, -3.8308e-39,
4.6279e-39, 5.8135e-39, 2.0652e-39,
3.9864e-39, 4.4000e-39, 5.5163e-39,
2.9644e-39, 2.7537e-39, 3.6593e-39,
4.7872e-02, -2.5857e-02, 4.8810e-02,
1.0389e-01, -1.0782e-01, 4.1365e-02,
9.5778e-02, -5.2341e-02, 4.5947e-02,
-8.2652e-40, -5.7602e-39, 4.6187e-39,
-2.8365e-39, 1.4981e-39, 6.2504e-39,
-4.8330e-39, 4.0283e-39, 4.9792e-39,
-1.0893e-03, -8.2708e-02, -1.7925e-01,
8.3461e-02, 3.1339e-02, 8.8096e-02,
7.3139e-02, -1.2212e-01, 1.0489e-02,
-2.4187e-01, -3.8397e-01, 1.3730e-01,
1.9217e-01, 1.4101e-01, 4.9795e-01,
-1.1441e-01, 3.3343e-01, 7.9194e-02,
1.4556e-01, -5.1060e-01, 2.1556e-01,
3.5719e-01, 2.7282e-01, -1.9015e-01,
-1.0941e-01, 2.7634e-02, 1.1833e-01,
-9.3316e-02, -4.1307e-03, 7.8613e-02,
-2.1526e-02, -6.7141e-02, 2.5513e-02,
-3.3942e-02, -8.6282e-02, 3.0446e-02,
-4.5124e-39, -2.7154e-39, 4.9467e-39,
-4.2299e-39, -5.9485e-39, -2.9606e-39,
-4.7642e-39, -4.7981e-39, -4.0169e-39,
-3.8238e-39, 5.7381e-39, 4.0097e-39,
1.9550e-39, 4.5523e-39, 3.1206e-39,
6.0200e-39, 3.0406e-39, 2.0498e-39,
-3.2474e-01, 1.1052e-02, 4.7197e-02,
-1.4658e-01, 1.6728e-01, 5.2190e-02,
4.3174e-02, 4.5864e-02, 5.4472e-02,
2.6403e-39, 2.7421e-39, -4.3011e-39,
-3.6258e-39, -1.3708e-39, 3.6147e-39,
-1.9471e-39, 4.5896e-39, 4.5992e-39,
-9.9986e-02, 7.0727e-02, 8.5023e-02,
2.2501e-02, 1.4343e-01, 1.1878e-01,
2.8126e-02, 7.3239e-02, 1.0468e-02,
4.5032e-01, 4.4730e-01, 1.3446e-01,
-1.3374e-01, 8.8554e-02, 3.5610e-01,
3.0584e-01, 2.3536e-01, 1.6161e-01,
-5.1485e-01, 1.2372e-01, 5.4379e-02,
-2.9665e-01, -3.3157e-02, -1.8688e-01,
5.1777e-02, -1.4315e-01, -1.1366e-01,
-2.4471e-01, 5.5554e-02, 8.9284e-02,
-1.6870e-01, 7.6156e-02, 1.2472e-01,
-1.5633e-01, 4.3184e-03, 1.1078e-01,
4.0579e-39, -3.8271e-39, 1.1535e-39,
6.6968e-40, -1.1545e-39, -5.4217e-40,
3.5566e-39, -4.4956e-40, -1.7097e-39,
-4.1778e-39, -3.7655e-39, -3.7148e-39,
-3.8013e-39, -3.5225e-39, -3.4678e-39,
-3.8369e-39, -3.5583e-39, -3.6518e-39,
-1.4894e-02, 2.4801e-03, -4.6996e-02,
6.7453e-04, 1.8799e-02, 2.9889e-02,
7.2700e-03, 1.2385e-01, 9.2522e-02,
3.9300e-39, 3.1853e-39, 2.8376e-39,
2.8888e-39, -4.8734e-39, 2.3402e-39,
-3.9710e-39, -4.3243e-39, 4.1151e-39,
1.6399e-02, -8.2828e-02, -5.8361e-02,
2.1315e-02, 1.1968e-02, 6.8727e-02,
3.8558e-02, 1.5451e-02, 5.4465e-04,
1.0549e-02, -8.6468e-02, -1.8535e-01,
-1.3616e-01, 2.7371e-01, 1.1157e-01,
-1.7097e-01, 1.3659e-01, 2.2831e-02,
-3.3897e-02, 1.3307e-01, 7.4482e-03,
4.8120e-01, 7.7053e-01, 5.3354e-01,
-2.4277e-01, -5.9136e-02, -1.3419e-01,
-7.4653e-02, -6.4169e-02, -2.9526e-02,
-3.6336e-02, 7.2362e-02, -3.5332e-02,
6.2628e-02, 6.2278e-02, 3.5639e-02,
3.6614e-39, -2.6150e-39, -3.5229e-39,
5.3538e-39, -1.2368e-39, 2.1530e-39,
4.8585e-39, -2.4150e-39, 5.2220e-40,
3.8610e-40, 1.4772e-39, 2.1962e-39,
-1.8493e-40, 1.1409e-39, 1.7309e-39,
-2.5751e-40, 9.1351e-40, 1.3106e-39,
6.2867e-02, -1.2727e-01, -6.5307e-02,
1.1415e-01, -4.5529e-02, -1.1358e-01,
4.3427e-02, -6.0994e-02, -7.7808e-02,
-4.1831e-39, 1.3230e-39, 5.5853e-39,
-3.4646e-39, -7.2824e-40, -3.4263e-39,
1.5344e-39, -5.8245e-39, 1.9910e-39,
1.1000e-02, -3.7088e-03, -8.0042e-02,
9.7603e-02, 8.6581e-02, -1.8921e-03,
2.2820e-01, 6.8073e-02, -8.1081e-02,
-3.3901e-01, -1.1231e-01, -8.6476e-02,
1.1147e-01, 4.9587e-01, -1.7039e-01,
-2.0702e-01, 5.8730e-02, -1.3475e-01,
2.3548e-01, -6.8044e-02, 9.4296e-02,
4.4803e-01, 6.1517e-03, -5.5192e-02,
-2.7304e-01, -2.6003e-02, 4.0713e-01,
2.8621e-02, 6.2698e-03, -1.4746e-01,
9.4819e-02, -1.3109e-02, 3.5540e-02,
4.4047e-02, 3.5066e-02, -9.5886e-03
}
,
{
-6.7011e-03, 1.7398e-01, 1.4767e-01,
-1.9882e-02, 1.9286e-01, 4.8626e-02,
1.1465e-01, -4.4017e-02, -1.9288e-01,
-7.5817e-02, 1.5598e-01, 1.2329e-01,
3.4126e-03, -9.4884e-02, -4.2276e-02,
3.9110e-02, -1.3477e-01, -4.4951e-02,
6.0450e-02, 4.4656e-01, 3.8954e-01,
-2.1207e-01, -1.0600e-02, -5.6351e-01,
1.8074e-01, 3.0797e-02, -4.0380e-01,
-1.0733e-01, 3.7228e-02, 9.7157e-02,
-7.5810e-03, 5.5605e-02, -9.1898e-02,
-1.4992e-01, -5.3206e-02, -1.9667e-01,
-1.6667e-01, 7.6091e-02, 1.7064e-01,
2.5322e-01, -9.4636e-03, -2.7899e-01,
4.2013e-02, 1.5693e-01, 3.1124e-01,
-2.1534e-02, 1.3915e-01, -2.8199e-01,
-2.9683e-03, 1.4445e-02, -1.5552e-01,
3.4759e-02, -2.0321e-01, -1.1155e-01,
3.6164e-02, 2.8664e-01, 2.3426e-01,
-1.2525e-01, -1.7195e-01, -5.2270e-02,
3.8782e-02, 5.7734e-02, 2.1945e-01,
1.0243e-01, -1.3159e-01, -1.7844e-01,
-6.0359e-02, 1.9125e-01, 3.3553e-01,
-1.0876e-01, -1.2149e-01, -5.7185e-01,
-2.0583e-02, -4.8168e-03, -7.1908e-02,
-2.3428e-02, 2.9902e-02, 1.0888e-02,
3.6383e-02, 1.0052e-01, 2.8972e-02,
1.1415e-03, -3.4518e-02, -9.0058e-02,
7.3207e-03, 6.0961e-02, 7.5629e-02,
-4.5969e-02, 2.4314e-02, 6.7658e-02,
-1.3043e-01, -3.0343e-01, -2.0799e-01,
-4.6261e-02, -1.7650e-02, -7.2160e-02,
-2.6291e-02, 1.5707e-01, 9.5021e-02,
-4.1030e-02, -8.1977e-02, -3.0776e-02,
-3.0685e-02, 8.2163e-03, 4.0357e-02,
-6.9633e-02, 6.0690e-02, 1.5418e-02,
-1.2814e-01, 7.3968e-02, -3.3742e-03,
-1.5239e-01, 8.9941e-03, 1.7877e-01,
2.1219e-01, -5.2057e-01, -2.2284e-01,
-3.4681e-02, -1.3594e-02, 1.6700e-01,
-7.7366e-02, 8.5138e-03, -4.3159e-02,
4.0597e-02, 9.7247e-04, -3.4326e-01,
-2.1424e-01, -1.6489e-01, -4.3248e-02,
1.5987e-01, 4.6235e-01, 2.6287e-01,
-1.2270e-02, 1.3165e-01, 5.3217e-02,
7.2716e-02, -7.0677e-02, -1.7740e-01,
-6.2357e-02, 1.1932e-01, 1.5733e-01,
-1.0275e-01, 1.4966e-01, 4.8125e-02,
-4.7150e-02, 1.5516e-01, 6.9615e-02,
6.1252e-02, 5.3859e-02, 1.7052e-01,
3.1940e-02, 1.1842e-01, 4.2265e-02,
-4.9531e-02, 1.1519e-01, 9.8914e-02,
1.3455e-01, 1.3177e-01, -2.7938e-03,
1.1895e-01, 1.1377e-01, 6.1035e-02,
8.0390e-02, -4.1028e-02, 3.7415e-03,
-1.0317e-01, 1.0279e-01, -6.5789e-03,
-2.3339e-02, 7.2741e-02, 4.1662e-02,
-7.4087e-02, 8.8531e-02, -4.9697e-02,
4.6134e-02, 1.4300e-01, 1.1720e-01,
3.8271e-03, 1.7108e-01, -2.4779e-02,
6.9844e-02, -4.6467e-02, -9.1699e-02,
5.5704e-02, -3.0312e-02, -7.8252e-03,
-4.3799e-02, -1.6623e-01, -2.3006e-02,
4.9214e-02, 3.1528e-02, 3.3302e-02,
3.1213e-02, 9.8880e-02, -1.1098e-01,
4.5092e-02, -1.6922e-03, -5.1380e-02,
7.6063e-02, 1.4159e-01, 4.1409e-02,
8.0812e-02, 9.7569e-02, 4.1532e-02,
-1.1136e-01, -4.3686e-02, -1.4144e-01,
-9.7717e-02, 4.8239e-02, 5.3374e-02,
-1.1827e-01, 1.0008e-01, 8.6368e-02,
-6.2572e-02, 3.6484e-02, -6.3361e-02,
4.1008e-03, 1.6709e-02, 4.0553e-02,
2.2766e-02, 2.7241e-02, 5.1786e-02,
1.3607e-02, 5.4638e-02, 6.9439e-02,
-2.4211e-02, 4.0065e-03, -1.9540e-03,
-9.5697e-03, 3.0503e-02, 3.5809e-02,
-4.3456e-02, 2.8959e-02, 4.2898e-02,
-1.5629e-02, -9.4347e-02, 7.2799e-02,
2.3115e-01, 7.3449e-02, 6.9354e-02,
1.6014e-01, 1.8878e-01, -2.2148e-02,
-4.9274e-02, -6.9233e-03, 1.0578e-02,
-4.3291e-02, -7.8361e-03, 1.6647e-02,
-5.6168e-02, 1.0317e-02, 3.1170e-02,
1.2530e-01, -3.2398e-02, -6.5690e-02,
-2.5805e-01, 3.6079e-02, 3.5390e-02,
-1.7236e-01, 6.6798e-03, 4.8924e-02,
1.3314e-01, 5.0646e-02, -3.4844e-02,
-1.2559e-01, -1.1774e-01, 1.2898e-01,
-7.7402e-02, -1.0703e-02, -2.6359e-01,
-3.8706e-02, -2.2082e-02, 2.7591e-03,
-8.2353e-02, -3.1941e-02, -1.1937e-01,
2.9747e-02, 2.0041e-01, -5.1984e-02,
1.7919e-01, 6.3603e-02, -5.5516e-02,
1.0116e-01, 8.7370e-02, -8.6624e-02,
-8.4314e-02, 3.5997e-02, 2.1161e-01,
1.0902e-39, 9.3514e-40, 9.3074e-40,
9.8377e-40, 1.1299e-39, 8.2024e-40,
1.2062e-39, 1.0405e-39, 1.0284e-39,
-5.7829e-40, -6.7489e-40, -6.3814e-40,
-6.8460e-40, -7.9377e-40, -7.6449e-40,
-4.7632e-40, -5.6022e-40, -5.2053e-40,
1.8459e-39, 2.1036e-39, 2.1848e-39,
2.0535e-39, 2.3728e-39, 2.4416e-39,
1.7027e-39, 2.0249e-39, 2.0833e-39,
9.1594e-40, 8.0493e-40, 7.7836e-40,
7.5889e-40, 6.3026e-40, 9.3384e-40,
9.6987e-40, 1.1273e-39, 8.1906e-40,
-7.9046e-39, -7.2328e-39, -7.1040e-39,
-7.9046e-39, -7.1862e-39, -7.4931e-39,
-6.5243e-39, -7.1117e-39, -6.9941e-39,
1.3577e-39, 3.5945e-40, -3.6833e-40,
1.3768e-39, 6.9779e-40, -7.5180e-40,
5.7295e-40, -6.0767e-41, -1.3085e-39,
7.7960e-39, 7.8579e-39, 7.4482e-39,
7.4224e-39, 7.5791e-39, 7.4378e-39,
6.5819e-39, 6.7271e-39, 6.6281e-39,
-1.6535e-39, -7.7817e-40, -8.5918e-40,
-2.0861e-39, -1.3658e-39, -1.0560e-39,
-3.4360e-39, -2.6878e-39, -2.6477e-39,
4.6460e-02, 1.1676e-01, -5.9846e-02,
8.6467e-03, -1.1287e-02, 7.0129e-02,
-1.1277e-01, 1.0321e-02, -1.9567e-02,
1.2145e-01, -7.1995e-02, -1.3615e-02,
9.7877e-02, 6.6061e-02, 1.0272e-02,
1.1391e-01, 5.6974e-02, 9.7472e-02,
-3.3605e-02, 6.1751e-02, -4.3004e-02,
-5.1040e-02, -3.8798e-02, -7.1736e-02,
-1.0179e-02, 8.5964e-02, -8.1435e-04,
2.5149e-02, 7.1990e-02, 8.1534e-02,
6.3133e-02, 5.8643e-02, 4.6756e-02,
-5.3580e-03, 3.4411e-02, 5.2957e-03,
1.0652e-01, -6.6035e-02, 8.5754e-02,
3.2919e-01, -1.5958e-02, 2.1694e-03,
-9.0943e-02, -2.1920e-02, 2.9706e-02,
4.7986e-02, 1.7105e-02, -5.7711e-02,
-4.2066e-03, 6.5668e-02, -1.6617e-01,
1.0057e-02, -2.0108e-03, -1.5499e-01,
6.7941e-02, 1.7352e-01, 4.9498e-02,
6.2013e-02, 9.6180e-02, -2.9861e-03,
-1.2482e-02, 9.5709e-03, -8.7913e-02,
-8.6954e-02, 9.9646e-03, 8.0050e-02,
-4.4157e-02, -6.3008e-03, 4.0645e-02,
-7.9624e-02, 1.0856e-01, -4.5341e-04,
7.1085e-02, 5.7002e-02, 1.1673e-02,
-5.1378e-02, -2.3945e-03, -5.9532e-02,
3.4998e-02, -3.6019e-02, 1.0428e-02,
5.9774e-03, 5.4993e-03, 2.4306e-02,
-5.9813e-03, 4.4999e-02, 7.4744e-02,
-3.0773e-02, -3.6835e-02, 5.8396e-04,
-3.8644e-01, 2.4563e-01, 1.2436e-01,
-3.2986e-01, -1.1044e-01, 2.0753e-01,
-1.3621e-01, -1.3544e-01, 5.8882e-02,
8.8837e-02, 5.7460e-02, -3.0960e-02,
-1.2598e-03, 3.9124e-02, -5.3322e-02,
-4.4227e-02, -3.8000e-02, -3.2677e-02,
1.5675e-01, 1.0808e-01, 1.1024e-01,
5.4468e-01, -5.9268e-01, 1.0088e-01,
8.2360e-02, 1.9646e-01, 6.4799e-03,
1.6357e-01, 6.8273e-02, -1.2051e-01,
4.9511e-02, 4.7334e-01, -4.8876e-02,
-1.3130e-01, -5.1568e-03, 1.0088e-01,
-5.8971e-02, 2.5775e-01, 9.0169e-02,
-3.0461e-01, -3.2353e-02, -2.0293e-01,
1.3897e-02, 1.4249e-01, -5.8661e-02,
-1.3624e-01, -5.3026e-02, 3.1038e-03,
-5.6211e-01, -2.8375e-01, -1.2524e-01,
-2.3813e-01, -2.2439e-02, -4.4082e-02,
9.9066e-02, -7.1735e-02, 2.2345e-02,
-1.4791e-02, 1.3225e-01, 8.9460e-02,
-4.8986e-02, -3.2296e-02, -4.7474e-02,
6.5865e-02, -8.0697e-02, -6.8475e-02,
-7.6845e-02, 1.1568e-01, 3.7443e-03,
1.0448e-01, -3.3206e-03, 5.4523e-02,
5.5741e-02, 5.0917e-02, 1.0209e-01,
-9.6729e-02, 7.8876e-02, -4.9550e-02,
-3.8926e-02, 7.1163e-02, 8.9436e-02,
-1.4001e-03, -9.4980e-02, -7.7747e-02,
9.4335e-02, 1.1605e-01, 9.5715e-02,
1.7951e-02, 4.3177e-03, -5.6937e-02,
4.4558e-02, -5.2562e-02, 4.0652e-02,
1.8058e-01, -1.0763e-01, 4.8927e-02,
-5.2569e-03, -1.3437e-01, 2.8578e-02,
1.3592e-02, -3.9346e-02, 1.0003e-01,
1.8091e-01, 7.2687e-03, -3.7241e-02,
6.0438e-02, 5.7872e-02, 7.3778e-02,
1.2411e-02, 4.1856e-02, -2.8892e-02,
3.2884e-02, 6.9072e-02, -5.9363e-02,
-1.7112e-01, -9.9734e-02, -7.3417e-02,
-8.9623e-02, 4.5292e-02, -1.6635e-01,
-3.1895e-02, 1.4284e-01, 2.0752e-01,
2.3383e-02, -1.3490e-02, 5.1593e-03
}
,
{
5.8708e-01, 2.6026e-01, 8.8379e-02,
3.1818e-01, 7.0055e-03, 1.1652e-01,
1.1719e-01, 8.7711e-02, -1.1687e-02,
7.5741e-02, -3.7970e-01, 1.6001e-01,
1.0739e-01, 3.1735e-01, 2.0061e-01,
8.6719e-02, 8.5111e-02, -3.9354e-02,
-9.9512e-02, -9.1524e-02, -9.7984e-02,
5.6333e-02, -1.5928e-01, 1.1998e-03,
2.7488e-02, 2.8168e-02, 1.3768e-01,
5.9686e-02, 2.8931e-01, -1.7131e-02,
1.6391e-01, 3.3748e-01, 1.2296e-01,
8.9242e-02, 1.4761e-01, 1.7187e-01,
-2.6352e-39, -4.0703e-39, -5.1751e-39,
-2.5214e-39, -3.9666e-39, -4.6282e-39,
-2.4635e-39, -3.6734e-39, -4.3359e-39,
-7.1654e-02, 7.9691e-03, -1.0219e-01,
-5.5684e-02, -1.3065e-01, -1.9106e-02,
1.0561e-01, 5.9054e-02, -2.1279e-02,
-1.8840e-02, 1.6690e-01, 3.8050e-01,
6.2779e-02, -1.2124e-01, 5.0304e-01,
2.1870e-02, 1.7631e-01, 1.4858e-01,
1.4614e-01, -1.1767e-01, -3.9155e-02,
1.2963e-01, -4.6753e-02, 1.3848e-01,
-8.2292e-02, 2.1908e-01, 6.2794e-02,
-3.2625e-01, -8.8528e-03, -6.5603e-03,
5.4245e-02, 2.7983e-01, 2.1608e-01,
8.5890e-02, 1.0955e-01, -1.1606e-01,
9.7435e-02, 1.5911e-01, 6.7285e-02,
3.9570e-02, 1.9333e-01, -1.5531e-02,
-2.3475e-01, -2.5006e-02, 2.8106e-02,
6.8740e-03, 1.3261e-01, -3.8563e-02,
8.8758e-02, -4.2225e-02, 4.7042e-02,
5.6284e-02, -2.8303e-02, 3.4532e-03,
-4.0265e-02, -3.0645e-02, -5.2059e-02,
-4.6196e-02, -2.4868e-02, -3.3257e-02,
-3.7208e-02, -2.4100e-03, -7.1959e-04,
6.4237e-39, 6.1438e-39, 6.5434e-39,
6.1596e-39, 6.1608e-39, 6.3157e-39,
6.4263e-39, 6.4625e-39, 6.5877e-39,
1.1092e-01, -4.4784e-02, 9.1292e-02,
9.2900e-02, 1.2459e-01, -7.1447e-02,
2.6158e-02, -5.0219e-02, -5.6136e-02,
-5.8603e-02, 2.9323e-02, -2.4230e-01,
-9.4921e-02, 1.9103e-01, 1.1670e-01,
1.2022e-02, 6.2830e-02, 3.0393e-01,
3.3819e-02, 1.0040e-01, 8.2600e-02,
-8.7604e-02, 7.0641e-02, -1.0132e-01,
-9.9371e-02, 8.9363e-02, -1.0703e-01,
4.4603e-01, 7.9636e-03, 1.8834e-01,
1.1859e-01, 4.0760e-01, 9.6841e-02,
-1.1735e-01, 2.3993e-01, -7.7916e-02,
6.3481e-02, -1.4958e-01, 1.1554e-02,
5.2668e-02, 3.4379e-01, 8.3536e-03,
-5.5403e-02, 1.1655e-01, -7.5022e-02,
-8.2992e-02, -7.0322e-02, -1.0078e-01,
-1.4516e-02, -1.6558e-02, 6.6806e-02,
-6.7454e-04, -5.7525e-02, 1.5772e-01,
1.6446e-01, -1.1897e-02, -8.3387e-02,
7.1339e-02, 1.6254e-01, 1.6963e-01,
1.2630e-02, 5.7933e-02, 8.4686e-02,
-5.6318e-39, -6.1837e-39, -6.1661e-39,
-5.9923e-39, -6.2371e-39, -6.4922e-39,
-6.4206e-39, -6.6092e-39, -7.1603e-39,
4.6507e-02, -4.5924e-02, -7.3838e-02,
-3.3012e-02, 5.1295e-02, -7.4884e-02,
7.5389e-02, 1.2002e-01, 3.9442e-03,
9.9461e-02, 1.9607e-01, 1.4896e-01,
-1.1191e-02, 1.8352e-01, 2.6778e-01,
8.0977e-02, 1.0885e-01, 2.5331e-01,
3.1503e-02, -3.0004e-01, -6.9114e-02,
2.0705e-01, -2.0978e-02, 1.5154e-01,
6.3033e-02, -1.5721e-01, 5.1067e-02,
-1.1220e-02, 1.5315e-01, 4.5277e-03,
3.3250e-01, 1.4207e-01, 1.3469e-01,
5.2996e-01, -2.5803e-01, -4.5525e-02,
3.9807e-02, -1.7088e-01, -1.2414e-01,
2.1564e-01, -2.9160e-01, -1.8796e-01,
1.5482e-02, 2.7005e-01, 8.2446e-02,
5.4906e-02, -1.0507e-01, -8.0069e-02,
-4.5729e-03, -2.0621e-02, 5.0088e-02,
2.5479e-02, 9.5924e-02, 8.3813e-02,
4.7833e-02, -2.6191e-01, 3.3483e-02,
6.1653e-02, 7.1940e-03, -1.3578e-01,
1.7662e-01, -2.8194e-02, -2.7509e-02,
-1.9419e-39, -2.4904e-39, -2.7567e-39,
-2.9896e-39, -3.2700e-39, -3.6336e-39,
-3.8942e-39, -4.2028e-39, -4.5229e-39,
-1.6839e-02, -9.4421e-02, -3.0147e-02,
-6.5974e-02, -1.6716e-02, 5.0672e-02,
-7.9841e-02, -4.7086e-03, 5.0016e-02,
1.8223e-04, 3.3984e-03, 5.1965e-02,
-7.3512e-02, -5.6604e-03, -1.1630e-01,
-1.0767e-01, 3.2261e-02, -2.0044e-01,
1.0995e-01, 4.3581e-02, -3.9397e-02,
-1.4476e-02, -2.3087e-02, 2.6423e-03,
1.2047e-02, 1.2084e-01, 1.8563e-01,
-2.8497e-01, -2.5353e-01, 1.0933e-01,
8.8974e-03, 1.3315e-01, 1.9153e-01,
2.0427e-02, -8.9900e-02, 2.2363e-02,
2.8575e-02, 1.6351e-01, 1.1876e-01,
-2.7438e-02, -1.0816e-03, -5.5680e-02,
5.1369e-02, -2.0575e-02, 4.5232e-02,
9.4988e-02, 2.5418e-02, 8.9888e-02,
9.6631e-02, 1.5828e-01, 1.1577e-01,
-2.9665e-02, 3.2035e-02, 1.4428e-01,
7.4352e-03, 2.4917e-03, 4.2713e-03,
1.2534e-02, 2.1314e-02, 1.5963e-02,
2.2920e-03, 2.1864e-02, 2.2921e-02,
7.1089e-40, 5.3581e-40, 4.5922e-40,
6.2492e-40, 4.6365e-40, 4.5466e-40,
9.2740e-40, 7.7219e-40, 7.4187e-40,
-7.0909e-02, 1.1127e-01, -8.8953e-02,
-5.0537e-04, 4.5664e-05, 1.3829e-02,
7.4380e-02, 1.3900e-03, 4.0345e-02,
5.7173e-02, 8.7514e-02, -3.9945e-01,
4.4116e-02, 1.4148e-01, -2.7578e-02,
-1.2133e-02, 1.9647e-01, -2.6767e-02,
8.5870e-02, -1.3723e-02, 1.3408e-02,
7.9471e-03, 7.8321e-02, 5.1118e-02,
-8.3660e-02, -7.1584e-02, 2.7423e-02,
-5.5651e-39, -3.2350e-39, 4.7534e-39,
-4.8581e-39, -5.8010e-39, 6.3268e-39,
-3.4016e-39, 6.2313e-39, 5.7413e-39,
-3.0708e-39, 6.0155e-39, -6.3317e-39,
-3.1054e-39, -5.5914e-39, -6.4181e-39,
-1.3636e-40, -6.0343e-39, -6.2034e-39,
1.0108e-39, -2.5283e-39, -8.6098e-40,
1.0088e-39, -2.3042e-39, -8.2029e-40,
1.2802e-39, -3.7761e-39, -4.6451e-40,
1.4160e-39, 7.3869e-40, 1.3275e-39,
1.2560e-39, 1.0078e-39, 1.2296e-39,
-2.4490e-39, 8.6071e-40, -2.4510e-39,
2.1753e-39, -2.0576e-39, -2.1365e-39,
2.0157e-39, 2.0755e-39, 1.9439e-39,
2.0998e-39, 2.0732e-39, 2.1072e-39,
-1.1289e-39, -1.6132e-39, 4.8117e-40,
1.2029e-39, -1.3112e-39, 6.4761e-40,
1.4958e-39, -9.2719e-40, 8.9526e-40,
3.6032e-39, -4.9803e-39, -2.4410e-39,
-1.6429e-39, -4.9602e-39, -5.9626e-39,
-1.6627e-39, -4.9809e-39, -5.6258e-39,
1.6619e-39, 1.7856e-39, 5.1822e-39,
1.5443e-39, 1.4215e-39, 6.1830e-39,
1.4242e-39, -1.7895e-39, 5.2206e-39,
-2.4764e-01, -2.8696e-01, -5.7562e-03,
1.9255e-01, 5.1335e-02, -1.4512e-01,
-1.1017e-02, -3.6505e-02, -1.1773e-01,
5.8651e-02, -1.9354e-02, 2.1595e-02,
-3.5114e-03, 1.8335e-01, 4.0043e-02,
1.0579e-01, -6.3055e-02, 2.6981e-02,
-1.4351e-02, -1.5029e-02, -9.7792e-02,
4.6718e-02, 3.8673e-02, -2.3410e-02,
-2.8942e-03, -8.4898e-03, -3.3613e-02,
2.0298e-01, 9.7218e-02, 1.5052e-01,
3.2108e-01, 2.6568e-01, 1.3809e-03,
1.0008e-01, 6.9262e-02, -4.7810e-02,
4.1291e-39, 4.3762e-39, 4.2724e-39,
4.5864e-39, 4.7827e-39, 4.8821e-39,
4.5529e-39, 4.6921e-39, 4.7519e-39,
9.1246e-03, -1.8136e-02, -5.8517e-03,
9.1080e-03, 4.2591e-02, -1.5604e-02,
-3.6270e-02, 5.9184e-02, 2.3189e-02,
4.2636e-02, 3.6600e-01, 4.7134e-01,
3.6666e-02, 4.3565e-01, 2.1105e-01,
-5.2747e-02, 4.0503e-01, 2.0926e-01,
8.8427e-02, 4.9138e-02, -2.3381e-01,
-5.6521e-02, 7.5013e-02, -1.4783e-01,
-4.7299e-02, -8.1200e-02, -6.5665e-02,
-1.6281e-01, -2.3070e-01, 5.4033e-02,
1.1527e-01, 3.4730e-01, 1.9293e-02,
-1.8352e-02, 2.0626e-01, -1.1955e-01,
8.1665e-02, 3.8584e-02, 2.7958e-03,
6.4294e-02, 1.3912e-01, -5.6370e-02,
-1.7618e-02, 9.0357e-02, -5.5021e-03,
9.3211e-05, 1.5219e-01, 1.0844e-01,
7.6218e-02, 1.7016e-01, 9.2438e-02,
4.3387e-02, 8.0141e-02, -3.2034e-02,
9.2121e-03, -2.8742e-03, -1.5988e-03,
9.1980e-03, 1.6983e-02, 3.3154e-03,
-2.5642e-02, 4.1607e-03, 6.9246e-03,
3.7665e-40, -4.0391e-41, -4.0502e-41,
2.2436e-40, -1.7190e-40, 1.6583e-40,
1.4090e-40, 2.2914e-41, 6.7388e-41,
-8.1776e-02, 9.0814e-02, 1.0222e-01,
-3.4949e-02, 1.0266e-01, 3.6826e-02,
-8.3856e-02, 1.1102e-01, 1.1026e-01,
1.5993e-02, -1.1626e-01, -3.0870e-01,
-3.4119e-03, 1.7638e-01, -1.9092e-01,
-1.2549e-01, 3.2538e-01, -7.9381e-02,
3.8433e-03, -8.2530e-02, 3.2103e-02,
-1.1637e-02, -1.0371e-01, 2.3851e-02,
2.5390e-02, 7.7085e-02, 8.9536e-02
}
,
{
-2.8918e-02, -8.3719e-02, -3.3026e-02,
-2.2620e-01, 2.4280e-02, -2.1254e-01,
2.8231e-02, 3.5323e-02, -2.8425e-02,
1.6891e-01, 3.8192e-03, 7.2794e-02,
-1.6364e-01, -4.1031e-02, -1.3141e-02,
-3.9478e-02, 1.4910e-01, -7.0978e-02,
-6.3880e-02, 9.8206e-02, 1.3163e-01,
1.5778e-01, 1.1914e-01, 3.3277e-01,
-3.6808e-01, -5.5627e-01, 1.4401e-01,
-4.0314e-01, 3.6298e-01, -3.8212e-02,
-2.3782e-01, 2.5410e-01, -2.2334e-01,
7.6542e-02, 9.4998e-02, 3.3399e-02,
-1.8601e-01, -1.8863e-02, -4.1835e-02,
-5.8671e-02, -8.9987e-02, -6.1069e-02,
-7.1062e-02, -9.5987e-02, 1.2318e-02,
5.4541e-39, -1.8871e-39, 4.5048e-39,
-2.2237e-39, -5.4753e-39, 1.4395e-39,
-3.5753e-39, 6.1466e-40, -2.1567e-39,
4.5273e-02, 1.1619e-02, 1.1379e-01,
1.4093e-01, 1.0444e-01, 1.1283e-01,
-3.0230e-02, 3.1937e-01, 5.0541e-02,
8.2862e-02, -3.1540e-02, -6.4833e-02,
1.5168e-01, 1.7613e-03, 4.2690e-02,
1.8820e-01, 4.3783e-02, 6.3473e-02,
8.0477e-02, 1.0397e-01, -3.6337e-02,
-7.2828e-02, 6.4048e-02, 4.2476e-02,
-1.3974e-04, -2.2468e-01, -4.9189e-02,
-2.7478e-03, 8.7663e-03, 4.3870e-02,
-3.3168e-02, 1.1915e-01, -1.8083e-02,
4.8155e-02, -4.1742e-02, 1.1251e-01,
-6.1535e-02, 5.1782e-02, -2.3494e-02,
5.1677e-02, 1.4067e-01, -1.0377e-01,
3.2951e-03, 1.1942e-02, -1.1775e-01,
-2.2104e-02, -8.1073e-02, -3.7509e-02,
6.8970e-03, 1.6406e-02, 4.6923e-02,
-8.8448e-03, 2.9130e-02, 3.1024e-02,
7.6795e-02, 4.6816e-02, -1.3204e-02,
1.3988e-01, 1.1175e-01, 8.7121e-02,
1.2097e-01, -3.8463e-02, 6.7387e-02,
1.4708e-39, 1.7125e-39, 2.7764e-39,
1.5203e-39, 1.5811e-39, 4.4921e-39,
1.8828e-39, 1.7593e-39, 2.3774e-39,
4.3474e-02, -4.7065e-02, -7.1999e-02,
6.0338e-02, 3.7240e-02, 2.8802e-02,
-4.0701e-02, 1.8627e-02, -1.8181e-02,
5.5169e-02, 1.1874e-01, -7.0475e-02,
-1.3438e-02, 1.4335e-01, 1.5180e-01,
5.6331e-02, 7.9719e-02, 6.2691e-03,
-6.6460e-02, 2.7455e-01, 5.5916e-02,
1.3515e-01, -3.7263e-01, 1.3463e-01,
-4.0820e-05, 3.1896e-01, -8.3871e-02,
-7.6172e-02, 6.1963e-02, -1.3804e-02,
-5.2852e-02, 1.0006e-01, -3.4106e-02,
6.7218e-02, -3.8616e-03, -7.1788e-02,
1.6386e-02, -1.8612e-02, -1.7354e-01,
-1.2166e-01, 1.2667e-02, -3.3852e-02,
-3.2897e-02, 1.0343e-01, 2.4924e-01,
-1.3272e-02, 1.5705e-01, 6.7731e-02,
1.0637e-01, 1.9482e-02, -2.0655e-01,
-5.9087e-03, -7.1073e-02, 1.8723e-02,
-2.6087e-02, 1.5997e-01, 9.6264e-02,
1.2431e-01, 1.1462e-01, -9.7197e-02,
-6.2347e-02, -4.5239e-02, -2.6443e-02,
3.7406e-39, -4.6345e-40, 3.7971e-39,
-3.8112e-39, -3.5585e-39, 4.6938e-39,
6.0588e-39, -4.2403e-39, 1.5311e-39,
1.6381e-01, -6.8390e-02, 2.6527e-02,
-9.8612e-02, 2.1953e-01, -2.1886e-01,
7.4841e-02, -1.2118e-01, -8.1700e-02,
4.4974e-02, 7.7514e-02, -8.4620e-02,
-2.9808e-02, 2.1591e-02, -3.9502e-02,
-5.5797e-02, -6.5105e-02, -5.9860e-02,
-3.7811e-01, -2.3056e-01, -7.4491e-02,
4.0833e-02, -2.2613e-01, -1.4986e-01,
-1.0974e-01, -6.5161e-01, 1.7546e-01,
7.7903e-02, -1.5969e-02, -6.3040e-02,
-1.7819e-01, -7.1414e-02, 1.8451e-02,
-1.0618e-01, 3.5614e-03, 3.6719e-02,
1.5666e-01, 3.9222e-01, 9.1678e-02,
1.4519e-01, 5.7331e-01, -7.3466e-02,
1.0271e-01, 1.0803e-01, -1.3150e-01,
3.7496e-01, 1.5001e-01, 1.4727e-01,
3.2151e-01, 1.2875e-01, -8.1645e-02,
2.8629e-01, 1.9329e-01, -8.0009e-02,
-9.9557e-02, -2.6954e-02, 2.6042e-02,
-5.3374e-02, 1.1369e-01, 4.6503e-02,
-3.4068e-02, 9.1849e-03, -9.1420e-02,
4.6343e-39, 4.8289e-40, 3.1694e-40,
-3.5093e-39, -4.7356e-39, 7.1265e-40,
-4.9626e-39, -2.1280e-39, 1.8542e-39,
-1.3634e-01, -5.4825e-02, -6.6125e-02,
-2.0694e-01, 1.4924e-01, 1.4028e-01,
3.2735e-02, 7.6360e-02, -9.2541e-02,
-1.2149e-01, -7.9789e-02, -2.9591e-02,
1.2852e-02, 1.2457e-01, 1.3081e-02,
-3.2966e-03, 1.1089e-01, 8.6461e-02,
1.4352e-01, 5.9238e-02, -2.1140e-02,
7.3999e-02, 2.0893e-01, 3.5512e-02,
-5.3110e-02, 3.9222e-01, 1.3103e-01,
1.0168e-01, 1.6685e-02, 5.1616e-02,
9.8241e-02, -1.6502e-01, -1.2586e-01,
8.3915e-02, 7.4837e-03, 5.7355e-02,
-3.4982e-02, -1.2773e-01, 6.8213e-02,
-1.4674e-01, -3.6844e-01, 8.1546e-02,
-1.5385e-01, -7.0368e-02, 4.3894e-02,
7.8201e-02, -1.3952e-01, 1.5154e-01,
2.3880e-02, 1.4078e-01, -1.2906e-01,
-1.8268e-01, -1.5687e-02, -1.2588e-01,
-9.4643e-03, 1.4718e-02, 7.4932e-02,
3.0996e-02, -1.2339e-01, 1.7452e-01,
4.4221e-02, -1.3808e-01, -1.0205e-02,
-8.6959e-40, -3.7907e-39, -1.6020e-41,
4.3567e-40, 1.4647e-39, 6.5692e-40,
5.4286e-39, 8.8667e-40, -3.5047e-39,
2.4116e-02, -9.5358e-02, 1.6468e-01,
3.1916e-01, -2.3472e-01, -2.1644e-01,
1.2945e-01, -1.8403e-02, -3.2247e-02,
1.3666e-02, -3.0548e-02, -4.7635e-02,
-9.2714e-02, -2.1605e-01, -5.9464e-02,
-8.9110e-03, -3.9299e-03, -2.3289e-02,
-1.7855e-01, 9.0661e-03, -1.9142e-02,
-5.6754e-02, -5.4451e-01, -5.7664e-01,
1.6835e-01, 2.0531e-02, 2.0812e-01,
5.2794e-02, -9.0414e-02, 3.5560e-02,
3.7395e-02, 5.9355e-02, -3.6676e-02,
3.8035e-02, 6.7844e-02, 1.1042e-01,
5.0372e-02, 6.8188e-02, -8.5353e-02,
2.2769e-01, 5.9758e-01, -7.4568e-02,
7.8316e-02, 8.4925e-02, -4.0400e-02,
-7.7984e-02, -2.0739e-01, 1.1736e-01,
2.4528e-02, 2.1850e-01, 2.5639e-01,
-2.4561e-02, 8.4661e-02, -9.2191e-02,
-2.7006e-02, -7.8921e-02, -2.7124e-02,
-5.9232e-03, -2.7693e-02, 5.9524e-02,
9.7704e-02, 9.6223e-02, 2.0432e-02,
-2.5588e-39, 5.5478e-39, -5.6209e-39,
-4.7285e-39, 4.5875e-39, -5.7483e-39,
6.7240e-40, -3.5113e-39, -3.6246e-39,
1.6870e-03, -2.1707e-01, -3.8895e-02,
-5.8465e-02, -5.9146e-02, 1.1936e-01,
-2.7727e-02, -9.5047e-02, -2.2627e-01,
-9.5155e-02, -7.1422e-02, 9.4611e-03,
3.7587e-03, 1.6966e-02, 2.8839e-02,
-3.0794e-02, 1.9888e-02, -5.2541e-02,
-1.0708e-02, 3.0171e-02, -3.0473e-01,
-1.0214e-01, 4.2017e-02, 2.5568e-01,
-9.8664e-02, -5.5928e-01, -7.6876e-02,
-8.6821e-03, 4.6484e-02, -3.0836e-01,
-1.0205e-01, 6.8113e-02, -2.8059e-01,
-5.7828e-02, 2.0990e-02, -1.2843e-01,
7.5680e-02, 1.7504e-02, 1.6278e-01,
1.4075e-01, 2.4361e-01, 2.2737e-01,
-1.3044e-01, 8.2145e-03, 1.6344e-01,
-2.4780e-03, 1.5108e-01, 1.3313e-02,
-9.5257e-02, 6.1810e-02, -1.9386e-01,
7.1365e-02, 1.5328e-01, 9.5848e-04,
1.2278e-01, 7.8318e-02, 3.3400e-02,
4.8597e-02, 6.0632e-02, -5.7238e-02,
3.2522e-02, 4.5926e-02, -9.5566e-02,
1.0844e-39, -3.2490e-39, -2.6904e-39,
-3.0517e-39, 4.7535e-39, 4.3440e-39,
-1.3996e-39, 4.5201e-39, -3.6165e-39,
-5.6164e-02, 1.0353e-01, 6.6228e-02,
8.2147e-02, 4.7827e-01, 1.2004e-01,
-6.8150e-02, 1.8340e-01, 2.2113e-01,
1.0580e-05, -2.0949e-01, -1.0358e-01,
1.6206e-01, 1.2538e-01, -1.3104e-01,
1.3700e-01, 2.9282e-02, -8.7020e-02,
4.5467e-39, 5.9787e-39, 2.6105e-39,
-1.2670e-39, 2.9513e-39, -1.0811e-39,
-3.9129e-39, -1.8499e-39, 2.9297e-39,
5.7414e-39, 5.5907e-39, 5.5702e-39,
5.9004e-39, 5.7585e-39, 6.3188e-39,
5.7395e-39, 5.6146e-39, 5.6451e-39,
-7.3964e-39, -6.3330e-39, -5.5236e-39,
-7.5172e-39, -5.8828e-39, -3.7555e-39,
-6.9528e-39, -7.7656e-39, -5.5115e-39,
-7.9031e-39, -7.8200e-39, -7.7914e-39,
-7.4570e-39, -7.6413e-39, -7.9054e-39,
-7.3437e-39, -6.7956e-39, -7.0789e-39,
-3.6774e-40, 1.3572e-40, 3.0250e-40,
-4.1792e-40, -4.6240e-40, 2.2528e-40,
-5.2143e-40, -5.6847e-40, -4.2768e-40,
-4.0128e-39, 1.3485e-39, 1.3436e-39,
1.5337e-39, -3.9186e-39, 1.2120e-39,
1.2992e-39, 1.5671e-39, 1.5659e-39,
-4.6533e-39, -4.7029e-39, -6.0334e-39,
-5.1157e-39, -5.3257e-39, -5.8595e-39,
-4.3046e-39, -4.4391e-39, -5.0039e-39,
-1.0025e-39, -1.0145e-39, -8.6762e-40,
-1.0282e-39, -1.0939e-39, -9.4134e-40,
-1.1868e-39, -1.2133e-39, -5.4261e-40
}
,
{
-1.2633e-01, 2.7332e-01, -4.6674e-01,
-9.4537e-03, 9.6797e-02, -6.4975e-01,
1.8103e-02, 2.7190e-03, 2.3888e-01,
4.8553e-02, -8.7297e-02, 1.8415e-01,
3.1194e-02, -7.2899e-02, -8.1835e-02,
7.1639e-02, -3.1455e-02, -6.2866e-02,
-2.1413e-02, 4.6066e-02, 9.2372e-02,
1.5761e-01, -1.0352e-01, -3.4808e-01,
2.3715e-02, 1.6453e-01, -1.3699e-01,
1.1705e-01, -1.6882e-02, 1.2575e-01,
-2.9834e-02, -1.1558e-01, 4.7318e-01,
3.5301e-02, 1.1246e-01, 3.5038e-03,
1.5837e-01, -2.9968e-01, 1.6094e-01,
4.0562e-02, -1.6329e-01, -3.7023e-02,
-3.9991e-02, 1.7001e-01, -2.7735e-03,
8.8139e-02, -2.4828e-01, 5.5751e-04,
-1.3871e-01, -2.4839e-01, 1.7996e-03,
-1.1670e-01, 3.3651e-02, -2.9559e-02,
3.8572e-03, 3.7329e-02, 4.7511e-02,
-7.8848e-02, 1.2844e-01, 9.2677e-02,
-8.5041e-02, 5.7212e-02, -1.0415e-02,
-3.2462e-39, 2.3003e-39, 4.9676e-39,
-3.9261e-39, -6.8290e-40, 5.9119e-39,
-4.1242e-39, -1.1996e-39, 3.8436e-39,
-2.3243e-02, -2.2525e-02, 3.9668e-02,
-1.1210e-01, -2.3892e-01, 1.6431e-01,
-1.3998e-01, -1.5857e-01, -1.5625e-01,
-1.7634e-02, -3.9174e-02, -9.0936e-03,
-3.9428e-03, -1.6411e-02, 2.6484e-03,
1.1376e-02, -2.9057e-03, 6.3382e-02,
4.8930e-02, 9.1298e-02, 1.8195e-02,
-6.3365e-02, -1.5407e-01, 8.1543e-02,
4.9919e-02, 1.6852e-01, 4.4053e-02,
-4.8682e-02, -7.3614e-02, -6.9206e-03,
-4.8193e-02, -2.3704e-01, -8.3394e-03,
5.6024e-02, 3.7845e-01, -2.4550e-02,
5.2050e-02, 2.2027e-01, -4.1328e-02,
-6.6327e-02, 1.0450e-01, 1.7058e-02,
-1.2047e-01, 5.2494e-02, -1.8018e-02,
5.4807e-02, 1.1177e-01, 2.3511e-02,
6.0413e-03, -3.2457e-02, 7.6611e-02,
-2.1276e-02, 3.0054e-02, 5.0752e-02,
7.5556e-02, 2.5734e-02, -6.0634e-02,
1.2201e-01, -4.1533e-01, 2.7634e-02,
4.5560e-01, 3.2832e-01, 2.6277e-02,
1.9889e-39, 3.8337e-39, 4.0170e-39,
1.5149e-39, 3.6456e-39, 4.0474e-39,
1.1508e-39, 2.7381e-39, 3.8673e-39,
-7.9206e-02, -2.0763e-02, -2.4842e-01,
-6.5777e-02, -1.8446e-01, 2.6178e-01,
-1.7908e-02, -2.3039e-01, -3.5767e-01,
1.0324e-02, 1.3610e-01, 8.6519e-02,
1.3499e-01, 3.1933e-02, 9.1822e-03,
-3.6017e-02, -2.2056e-01, -2.3258e-01,
-7.6185e-02, -2.8981e-01, -1.1816e-01,
-9.9048e-02, 5.3879e-02, -1.7351e-01,
-2.1874e-01, -1.2109e-01, -3.1457e-01,
5.1576e-02, -2.5656e-02, 4.6789e-02,
7.6286e-02, 6.0126e-01, -2.5925e-01,
-5.3443e-02, -3.3656e-01, 4.7585e-01,
-4.7442e-02, -5.1580e-02, -8.5216e-02,
-1.0600e-01, -1.3859e-01, -3.1484e-01,
2.1454e-01, -1.1851e-01, -7.6614e-02,
-7.8873e-03, -7.0275e-02, -1.0958e-01,
-8.0654e-02, 1.3946e-01, 2.5292e-01,
1.3254e-03, -6.7372e-02, -2.6429e-01,
-8.2344e-02, 1.2388e-01, 5.2930e-02,
8.3665e-02, 3.9729e-01, 4.7687e-02,
-4.4502e-02, -8.3105e-02, -1.6430e-01,
1.2825e-39, 1.7532e-39, 2.1774e-39,
-2.1331e-39, -2.1826e-39, -1.0009e-39,
3.7081e-39, 2.0015e-39, -5.8349e-40,
-3.5278e-02, 6.5211e-02, -5.4199e-03,
8.3961e-02, 3.1410e-02, 4.4510e-02,
-5.4905e-02, 4.0727e-02, -1.5710e-02,
1.0813e-01, 8.2043e-03, 4.1303e-02,
1.3405e-01, 1.4150e-01, 7.2155e-02,
3.3942e-02, -4.7781e-02, 1.6095e-01,
-1.4266e-01, -2.5283e-02, 6.4043e-03,
-1.8699e-02, 1.0895e-01, -2.1497e-02,
5.5074e-02, 1.7031e-02, 1.0572e-01,
7.3199e-04, 1.0813e-01, -9.0280e-05,
1.4808e-01, 2.5436e-01, -1.3749e-01,
2.2936e-02, -7.9733e-02, -2.2360e-01,
6.0406e-02, -1.2874e-01, -7.4692e-02,
-1.3216e-01, -9.9889e-03, 2.7608e-03,
-1.1412e-01, -5.1312e-02, -1.7196e-02,
-2.2800e-02, -1.2112e-01, -9.3855e-03,
3.6905e-02, 1.0049e-01, 9.0602e-03,
-7.3200e-02, 1.0628e-01, -4.8218e-02,
-4.6525e-02, 6.0314e-02, -3.6467e-03,
-8.0943e-02, 2.5461e-01, 1.5461e-01,
-5.7708e-02, -5.7823e-02, 5.4042e-02,
3.8847e-39, 3.5806e-39, 4.1610e-39,
3.9082e-39, 4.1898e-39, 4.1926e-39,
4.1200e-39, 4.3759e-39, 4.3977e-39,
-3.3576e-01, 9.5443e-02, 2.7804e-02,
-2.3834e-01, -7.2650e-01, -1.2229e-01,
1.0380e-01, 1.9520e-01, 3.4571e-02,
-3.7291e-02, 7.6216e-02, 8.6171e-02,
-1.6324e-01, -8.6759e-03, 4.3038e-02,
-3.4364e-02, -7.2777e-03, 3.7451e-02,
1.8826e-01, 1.6387e-01, -3.4750e-02,
-2.0203e-01, 2.4170e-01, 9.0358e-05,
-1.3049e-01, 9.6855e-02, -1.6737e-03,
-6.3782e-02, 7.1413e-02, -6.5077e-02,
-1.5262e-01, 4.3261e-01, -8.4224e-02,
6.4632e-02, 1.0553e-01, -1.5274e-01,
4.4294e-05, 8.6239e-02, 5.7537e-03,
-5.7633e-01, -5.0076e-03, -5.2298e-02,
1.8556e-01, -1.1332e-02, -2.7010e-02,
1.6155e-01, -3.0337e-02, -9.6808e-03,
-2.8404e-01, -2.7625e-02, 1.6058e-02,
5.7937e-02, -6.6464e-02, 1.1096e-02,
7.8268e-02, 8.6122e-02, 2.9298e-02,
6.4696e-02, 2.0285e-01, 4.3660e-02,
1.5339e-01, -3.7650e-02, 7.1438e-03,
-8.9058e-40, -3.6429e-39, -4.7562e-39,
8.3914e-40, -2.8054e-39, -3.6702e-39,
4.3666e-39, -1.0602e-39, -3.0369e-39,
7.2731e-02, -1.0227e-01, -1.9583e-02,
-1.7466e-02, -2.0097e-01, 9.3108e-02,
6.5196e-02, -1.1880e-01, -3.5152e-03,
-5.6533e-02, 6.2109e-02, 5.2029e-02,
5.7971e-02, 5.1577e-02, 6.6318e-02,
-2.1669e-03, 7.7274e-02, -4.0609e-02,
2.8531e-02, -8.3960e-02, 1.3615e-02,
-1.1151e-02, -1.4162e-03, 5.6661e-02,
-8.0954e-02, -1.0600e-01, 4.3276e-02,
7.6762e-04, 3.1437e-02, -6.1084e-02,
-8.1119e-02, 2.1406e-01, 6.0836e-02,
4.8105e-02, -1.6263e-01, 9.2555e-03,
1.1060e-01, -2.1090e-01, 1.6435e-01,
-1.0248e-01, -1.1884e-01, -7.9929e-02,
5.9980e-02, 1.0271e-01, -1.1891e-02,
-7.5044e-02, -2.3655e-02, -5.2865e-02,
2.1542e-02, 2.7305e-04, 1.3508e-01,
-1.2317e-02, 9.0742e-02, -3.0079e-03,
-9.9020e-02, 1.5578e-01, -2.1482e-03,
-8.9029e-02, 1.8470e-01, 3.7571e-02,
-2.0394e-01, -1.3735e-01, 2.9648e-02,
-4.3016e-40, -7.3591e-40, -7.3773e-40,
-4.1239e-40, -8.6029e-41, -6.9504e-42,
-7.5082e-40, 1.2975e-40, 2.1462e-40,
-1.8967e-02, -1.4903e-01, 8.1452e-02,
1.2099e-01, -2.5524e-02, 1.3285e-02,
-1.3780e-01, -5.3359e-02, -3.1310e-02,
-1.8984e-02, 4.1962e-02, 1.0186e-01,
-1.0823e-01, 1.1079e-01, 7.8613e-02,
-1.4521e-01, -7.7509e-02, 1.8768e-02,
5.0613e-03, -3.0459e-02, -6.3055e-02,
4.4540e-02, 2.0135e-01, 9.6351e-02,
-1.9495e-02, -1.2314e-01, 1.1720e-02,
2.1739e-02, 5.2098e-02, -4.0453e-02,
-9.9983e-02, 4.7578e-02, -2.7862e-02,
-8.6565e-02, 1.5241e-01, -4.0462e-02,
4.0458e-02, -1.2871e-01, -4.3491e-02,
9.8981e-02, -1.3637e-01, 2.0092e-02,
1.5626e-01, -8.4550e-04, -2.5701e-02,
1.8511e-02, -1.0257e-01, -7.3238e-02,
-3.9802e-02, -1.6120e-02, -7.4068e-04,
-1.1377e-02, 9.7975e-03, -9.0342e-02,
-6.7152e-02, 1.0208e-01, 2.5234e-02,
-4.3687e-02, 2.5334e-01, 9.2712e-02,
3.7702e-01, 4.1450e-02, 1.9934e-02,
-5.4201e-39, -6.7158e-39, -7.5025e-39,
-5.2548e-39, -6.4829e-39, -7.2782e-39,
-4.9999e-39, -5.9599e-39, -6.0469e-39,
3.5890e-02, -7.3738e-02, 9.8899e-02,
3.3312e-02, 5.8231e-02, -2.1348e-01,
8.6289e-02, 5.0837e-02, -6.5613e-02,
7.0208e-02, 4.1424e-02, -6.0761e-02,
4.4654e-02, -3.3590e-02, -5.3044e-02,
1.2319e-01, -4.4666e-02, -8.8193e-02,
-9.0463e-02, -3.0083e-02, 6.8075e-02,
4.2531e-02, 4.3248e-01, 1.3480e-01,
9.2389e-02, 1.3683e-01, -2.6092e-01,
2.8925e-02, 2.3317e-01, 7.8128e-02,
6.3444e-02, 1.6291e-01, -3.8727e-03,
6.9107e-02, 6.8477e-03, 3.9528e-01,
3.8471e-02, 3.0745e-02, 2.8446e-02,
1.0625e-02, -2.4006e-01, -1.2490e-01,
-1.3002e-01, 2.0025e-01, 4.7618e-02,
-3.9705e-02, -1.2017e-02, -9.8790e-02,
-1.2798e-02, -2.7540e-01, -1.5138e-01,
-1.0290e-01, 5.0112e-02, -1.7391e-01,
-9.7079e-02, -2.2350e-03, -5.9211e-02,
-2.4728e-01, 4.3353e-01, -1.9306e-01,
-1.8039e-01, 1.2689e-01, 5.2103e-02,
-4.5547e-39, -7.8040e-39, 4.1196e-39,
1.5214e-39, 9.3494e-40, -3.9058e-39,
7.8718e-39, 7.1728e-39, 5.3609e-39
}
,
{
-9.4505e-02, -7.0477e-02, -1.5792e-04,
-2.3475e-01, 5.8849e-02, -6.8161e-02,
7.0658e-03, -1.0276e-01, 7.2471e-02,
-7.3820e-03, -3.0740e-02, -1.1131e-01,
2.8429e-02, -3.5750e-01, -8.4683e-02,
-5.0210e-02, -3.1096e-03, -2.3730e-02,
4.5756e-02, -3.6724e-01, -7.6317e-02,
3.8467e-01, 5.5354e-02, 1.6943e-01,
-4.9403e-02, 7.4709e-02, -3.0550e-02,
-7.5324e-03, -1.6910e-01, -1.6103e-01,
4.6314e-02, 1.2912e-01, -3.0488e-02,
2.6388e-02, 5.6925e-02, 6.4396e-02,
3.7748e-03, -2.1310e-02, 1.1410e-01,
-7.0164e-03, 1.8228e-02, -2.5920e-01,
6.8416e-02, 1.3998e-01, 1.3290e-01,
-3.8861e-02, 8.9898e-02, -3.6631e-03,
3.5528e-02, 1.1249e-01, 3.7018e-02,
-6.2334e-02, -4.8470e-02, -4.4094e-02,
3.1574e-02, -1.2162e-01, 1.9669e-01,
-4.6605e-03, 1.1887e-02, -1.1958e-01,
-1.0736e-01, 6.0131e-02, -1.2829e-02,
2.1305e-01, -8.4750e-02, -2.7028e-02,
-3.0351e-01, -6.4246e-03, -7.9128e-02,
1.3081e-01, 9.5878e-02, 1.6193e-02,
-5.8335e-02, -5.5968e-02, -2.6284e-03,
-7.2218e-02, -1.1661e-02, 1.9413e-03,
-1.6043e-01, 1.1388e-01, -3.6473e-02,
-2.4077e-02, 1.2210e-01, 1.5531e-02,
1.5074e-01, -4.5545e-01, 6.1004e-02,
-6.3948e-02, 3.9804e-02, -4.8822e-04,
1.3135e-01, 9.2392e-02, 8.8914e-02,
1.2941e-01, -3.6052e-01, 3.9571e-02,
-2.4838e-02, 7.0425e-02, -1.9016e-02,
2.7629e-02, -7.0648e-02, -2.6838e-02,
-2.1844e-02, -9.6184e-02, -3.3611e-02,
8.5938e-02, 5.2663e-02, 2.2938e-02,
-6.9909e-03, -3.9627e-03, -6.5162e-02,
-4.9296e-03, -4.0383e-02, 6.7670e-01,
1.5251e-02, 2.1000e-01, -1.9137e-01,
2.2825e-02, 1.6640e-02, 3.8147e-02,
7.1902e-02, -4.9821e-02, -6.5592e-03,
1.5826e-02, 2.1626e-02, 1.1646e-02,
1.5180e-02, 1.5664e-01, 9.8696e-03,
-7.2901e-02, -2.1818e-01, 9.2465e-02,
6.4349e-02, 6.0290e-02, -2.1094e-02,
2.0633e-02, 4.8808e-02, 1.4080e-02,
4.8083e-02, -1.5979e-01, -5.3634e-02,
6.5004e-02, 7.0317e-02, 1.9117e-02,
-4.3048e-02, 5.9627e-02, -1.5068e-02,
1.8861e-01, -2.6868e-01, 1.2789e-03,
1.1273e-01, -2.7796e-01, 4.9841e-02,
4.9008e-03, 1.8241e-02, 4.3449e-02,
2.1420e-02, -1.0299e-01, -1.6235e-01,
-1.9300e-02, -1.5121e-02, 2.0616e-03,
-2.7591e-01, 3.9622e-02, -5.0492e-02,
1.1866e-01, 5.5502e-01, -2.3622e-02,
-6.1204e-03, -7.4778e-03, 6.7961e-03,
2.4215e-02, 2.1643e-03, 1.1442e-01,
7.5326e-02, 1.4455e-01, 8.0497e-02,
6.6115e-02, 2.9762e-02, 2.8680e-02,
3.7784e-03, -2.2769e-02, 2.4529e-02,
-1.1441e-02, 9.8463e-02, -1.2761e-02,
1.0642e-02, 5.2871e-02, 1.9650e-01,
-2.2225e-02, 3.1504e-02, 8.5645e-03,
4.9125e-02, 1.4439e-01, 8.4573e-02,
1.0103e-02, 1.9097e-02, 4.5579e-03,
-2.5773e-02, -4.0984e-02, -1.5402e-01,
5.3050e-02, 1.5509e-01, -1.9040e-01,
3.7700e-02, 1.0632e-01, -2.2520e-02,
-5.6582e-02, -4.6040e-02, -5.7562e-03,
-3.4924e-01, 3.2933e-01, 5.5211e-02,
2.3230e-02, 8.5108e-02, 3.7448e-02,
1.4266e-02, -7.2016e-02, 4.5252e-03,
-7.0246e-02, 3.9142e-01, -1.9216e-02,
2.0536e-01, -3.5615e-01, 3.8009e-02,
1.2252e-02, -5.7966e-02, 9.2672e-02,
2.4225e-02, -1.0186e-01, -1.4219e-01,
-2.8815e-02, 1.3088e-02, -2.6031e-03,
-6.2341e-02, -1.1216e-01, -7.2122e-02,
1.1812e-01, 4.3493e-01, 4.3593e-02,
-1.3524e-02, 4.8679e-03, -1.0598e-02,
3.4904e-02, 5.5813e-02, 4.6811e-02,
8.0928e-02, 7.6607e-02, 6.3968e-02,
5.4647e-02, 2.8693e-02, 2.1957e-02,
-8.2725e-03, 5.4668e-02, -3.0533e-02,
-9.3953e-03, 1.5874e-01, -3.6093e-01,
5.6412e-03, 1.8977e-02, 2.0088e-01,
-1.9414e-02, 1.9088e-02, 1.4504e-02,
5.8462e-02, 6.2645e-02, 4.9884e-02,
6.6913e-03, 4.3639e-02, 1.5139e-02,
-2.1897e-02, -1.1436e-01, -5.0838e-02,
7.1176e-02, 8.4667e-02, -1.4480e-01,
3.7676e-02, 1.0840e-01, -2.6417e-02,
-4.7584e-02, -4.0524e-02, 6.3032e-03,
-2.4822e-01, 2.4635e-01, 5.5942e-03,
-1.3347e-02, 1.0515e-01, 4.2549e-02,
-1.2380e-01, 4.1074e-02, 1.2608e-02,
-1.2042e-01, 2.9516e-01, 2.8380e-03,
5.1930e-01, -1.6498e-01, 5.7152e-02,
-6.5519e-02, 1.1001e-01, 2.8943e-02,
1.0854e-01, -6.0107e-02, -1.6730e-01,
-4.4417e-02, 3.4347e-02, -3.3756e-02,
2.0694e-01, 3.3047e-01, -9.4497e-02,
-2.1977e-01, 4.6614e-02, 1.2201e-01,
-2.9541e-02, 1.8900e-01, -1.8391e-01,
2.0064e-02, -3.2480e-02, -8.9041e-03,
-5.6385e-02, -6.4531e-02, 1.2879e-02,
-3.2499e-02, 1.0883e-02, 7.3564e-03,
1.9828e-02, -2.3278e-01, -4.3789e-03,
9.7669e-02, 1.3008e-01, -1.0405e-01,
2.2618e-02, -2.5495e-01, -1.0718e-01,
4.3524e-02, -7.3127e-02, 8.2424e-02,
-5.0193e-02, 4.0634e-03, 4.0696e-02,
2.7419e-02, 1.8353e-01, 9.2117e-02,
-7.4918e-02, 1.0602e-01, -3.4752e-02,
-1.3331e-01, -2.9583e-02, -5.2197e-03,
-3.7852e-02, 1.5998e-01, 1.5078e-03,
-5.6512e-02, 1.3378e-01, 1.4512e-02,
4.5255e-02, 2.4702e-01, -2.4848e-02,
-1.7526e-01, 1.5532e-01, 8.6686e-02,
3.1486e-02, -2.3247e-02, 9.7320e-03,
-5.2106e-01, 4.7937e-02, 4.1614e-02,
5.5436e-02, -2.0432e-01, 1.2444e-02,
-5.6792e-02, -5.5632e-02, 5.7612e-02,
-6.0248e-04, 4.9770e-02, -6.7956e-02,
1.3389e-02, -9.4141e-03, -7.3497e-03,
-4.6361e-01, 2.7450e-01, -8.2210e-02,
-2.6737e-01, -6.6114e-02, 6.3568e-02,
1.6910e-02, 1.4456e-01, -9.0081e-02,
8.8278e-03, 2.1776e-02, 8.7710e-03,
-2.3378e-02, -4.3907e-02, -3.6751e-02,
-2.4694e-03, -6.0419e-03, 3.0840e-02,
-1.6968e-02, -8.2266e-02, -1.0049e-01,
3.4429e-02, 1.0960e-01, 3.8355e-01,
-4.0301e-04, -3.1089e-02, -2.1373e-02,
-2.4172e-02, 4.6432e-02, 8.0742e-03,
-2.3134e-02, 1.7789e-02, 2.7136e-02,
3.0729e-02, 6.9008e-03, 1.2822e-02,
3.5043e-02, -6.1749e-02, -1.2565e-02,
-1.0354e-02, -2.6515e-03, 4.5632e-03,
-5.9818e-02, -9.7686e-04, -6.6467e-03,
-5.0833e-01, 1.8474e-02, 1.3598e-02,
3.6287e-01, 1.3698e-01, -1.2806e-02,
-2.8618e-02, -2.9128e-02, 2.9855e-02,
8.1243e-02, 4.7414e-02, -4.7434e-02,
-3.3738e-02, -3.4926e-01, 1.7786e-02,
1.0056e-01, -5.7937e-02, -1.8308e-02,
1.8214e-02, -1.9519e-01, 2.2152e-02,
-7.3543e-02, 2.0786e-01, -5.8196e-02,
3.9396e-02, -4.5349e-02, 1.5748e-02,
-5.4604e-03, 4.5777e-01, 1.7295e-01,
-2.0570e-01, -3.0970e-01, -1.9075e-01,
7.6751e-02, -1.3099e-01, 6.1278e-02,
6.0222e-02, 5.4418e-02, 1.2259e-01,
3.2160e-02, 8.5146e-03, 3.4578e-02,
-5.4391e-02, -2.5285e-02, 1.0251e-02,
-3.2763e-02, 7.9163e-02, -7.5136e-02,
1.8545e-02, -2.1972e-02, 1.3887e+00,
-1.2402e-03, -2.5679e-01, 7.2392e-02,
4.9692e-03, 1.7034e-02, 4.7043e-02,
1.2093e-02, -3.1230e-02, -8.2613e-03,
-7.8701e-03, -2.3516e-03, -7.2487e-04,
6.8495e-02, -5.2837e-02, -2.2482e-01,
1.3259e-02, 4.8009e-01, -4.0940e-02,
-4.1547e-02, -2.8753e-02, -5.2579e-03,
-1.7152e-01, -3.3676e-02, 1.5080e-02,
8.6014e-02, 7.9239e-02, 4.2196e-02,
-9.2870e-02, -1.5913e-02, -6.5804e-03,
4.0364e-02, 2.4914e-02, -1.4638e-02,
8.8705e-03, 2.8037e-01, 3.9890e-02,
1.1638e-01, 2.9467e-01, -4.3518e-03,
7.1091e-02, -2.2378e-01, 4.7315e-02,
3.8006e-02, -2.0246e-01, -3.8679e-02,
-5.8004e-02, 5.8991e-02, -6.2149e-03,
-1.3034e-01, 1.5540e-01, -5.2558e-02,
8.1594e-02, 3.5570e-01, 2.1220e-02,
1.4977e-02, 2.4493e-03, -4.0627e-02,
1.1402e-01, 6.6962e-02, 1.1150e-01,
1.1824e-01, 1.1492e-01, 1.1219e-01,
6.6067e-02, 6.9639e-02, -8.1836e-02,
-2.7144e-02, 1.4677e-01, -5.9261e-02,
4.4573e-03, 2.6235e-01, -7.4379e-01,
-8.3569e-03, 9.4465e-02, -6.5653e-03,
2.1095e-02, -1.8853e-02, 6.7972e-02,
1.2957e-01, 3.0122e-02, -1.0061e-02,
-3.4832e-02, 8.5404e-02, 5.7663e-02,
-5.0400e-02, -1.2050e-01, -2.3344e-01,
1.4977e-01, 7.8806e-02, 6.0771e-03,
5.6483e-02, 6.3927e-02, -5.8376e-03,
-2.8124e-01, 5.2581e-02, -1.3918e-04,
-1.4341e-01, 3.6558e-01, 4.7332e-02,
-3.9089e-02, 8.4188e-02, 2.7058e-02
}
};
static __device__ __constant__ const float HDNL2biasL[8][8] =
{
{
7.2678e-02, 8.5350e-03, 5.0400e-02, 2.6268e-02, 6.2434e-02, 1.0483e-01, -7.1650e-39, 1.0062e-01
}
,
{
-4.9844e-39, -1.8567e-39, 6.0627e-04, -1.9234e-38, 1.8331e-02, -1.1364e-01, -8.3962e-03, -1.7372e-04
}
,
{
-0.0091, -0.0055, 0.0237, 0.0093, -0.0479, 0.0188, -0.0034, 0.0399
}
,
{
6.5694e-03, -2.2259e-01, -1.1226e-02, -8.0327e-02, -1.0615e-36, 1.0402e-02, 7.6246e-03, -6.5940e-02
}
,
{
5.0711e-02, 7.1911e-02, 2.5293e-02, -1.5608e-02, 5.3835e-02, -1.6967e-38, 2.2243e-02, 3.2742e-02
}
,
{
1.5629e-02, 2.9703e-02, 2.6412e-02, 1.2301e-02, 1.8654e-01, -7.2260e-03, 2.4613e-02, -3.1853e-38
}
,
{
-0.0030, -0.0123, 0.0348, 0.0277, -0.0152, 0.0005, -0.0124, -0.0209
}
,
{
7.4856e-03, 7.2931e-04, 8.3015e-03, 6.4820e-03, 2.4008e-04, 7.0377e-06, 1.7948e-03, 8.9869e-03
}
};
static __device__ __constant__ const float HDNL2kernelsL10[4 * 8] =
{
0.4240, 0.4165,
0.1648, 0.1909,
-0.0985, -0.4455,
0.4639, -0.0533,
-0.1368, 0.4413,
0.2539, 0.3294,
0.2458, -0.3256,
-0.0479, 0.3200,
-0.3977, -0.0422,
-0.2736, 0.1053,
0.3902, 0.0594,
-0.0721, -0.2988,
0.0495, 0.1309,
-0.1703, 0.0033,
0.3061, 0.1827,
0.2443, -0.1259
};
static __device__ __constant__ const float HDNL3kernelsL1[9 * 8] =
{
-0.0461, 0.1274, 0.2976,
-0.0393, -0.1251, 0.2527,
0.0791, 0.0600, -0.0303,
-0.0520, -0.5039, -0.3305,
-0.0115, 0.0456, 0.4370,
0.0601, 0.0780, 0.3106,
-0.0017, -0.0018, -0.0017,
-0.0017, -0.0018, -0.0018,
-0.0017, -0.0017, -0.0017,
0.2666, 0.1687, 0.2303,
-0.1901, 0.3825, 0.3024,
0.1811, 0.0581, 0.2080,
-0.1246, 0.0155, -0.4075,
0.1156, 0.5929, 0.1449,
-0.1080, -0.0171, -0.0516,
-0.0817, 0.2247, 0.0472,
0.0394, 0.1085, 0.1435,
-0.0480, -0.0135, -0.0606,
-0.0083, 0.2045, 0.1056,
-0.2239, 0.2823, -0.1926,
0.2581, 0.1362, -0.1914,
-0.0833, 0.0702, 0.0234,
0.3616, 0.3789, -0.1840,
0.0128, 0.1347, -0.0187
};
static __device__ __constant__ const float HDNL3biasL1[8] =
{
-0.1329, -0.0431, -0.0031, -0.0129, 0.2294, -0.2595, -0.2370, -0.0499
};
static __device__ const float HDNL3kernelsL[8][9 * 8 * 8] =
{
{
1.4090e-01, -1.8985e-02, -6.8589e-02,
6.6491e-02, 1.4360e-02, 8.5223e-02,
1.8782e-01, 9.8042e-02, -3.4558e-02,
2.5606e-01, 2.2027e-01, 2.7603e-01,
1.9424e-01, 3.4537e-02, 9.5975e-02,
1.1223e-02, -4.3377e-01, -1.4760e-01,
-3.4293e-40, -5.5421e-40, -4.4763e-41,
-6.3322e-40, -3.1495e-40, -7.8264e-41,
-1.5375e-40, -3.3656e-40, 5.2441e-40,
1.2413e-01, 1.5682e-01, 1.1465e-01,
1.6683e-02, 7.8382e-02, 1.0110e-01,
1.4902e-01, 1.3608e-01, 1.1674e-01,
-6.5160e-02, 7.7748e-02, 2.1773e-02,
2.0652e-02, 2.7245e-01, 1.0297e-01,
-2.0953e-02, 6.1685e-02, 4.4128e-02,
6.1538e-02, -1.9746e-02, -1.2785e-02,
2.5931e-02, 1.2740e-01, 9.0033e-02,
8.6448e-02, 2.0684e-01, 9.8063e-02,
-7.8384e-03, 6.3277e-02, 7.6751e-03,
3.5956e-02, 1.0555e-01, 4.2728e-02,
7.1578e-02, 1.3253e-01, 1.1171e-01,
-2.7538e-02, 1.5836e-01, 1.0014e-01,
-4.9113e-02, 1.6911e-01, 2.7329e-01,
7.9170e-03, 9.5440e-02, 1.3922e-01,
8.0151e-02, 4.3438e-02, 5.5314e-02,
3.4896e-02, 1.6816e-01, -4.5783e-03,
-1.4579e-03, 2.0493e-01, 2.6238e-02,
2.6499e-02, 3.9490e-01, -1.1582e-02,
3.5790e-01, 1.4317e-01, -2.1775e-01,
4.1794e-03, -3.2513e-01, -1.6729e-01,
3.4040e-41, -6.2960e-42, -1.0067e-40,
5.5978e-41, -1.2353e-40, -1.1347e-40,
5.4572e-40, -6.4384e-40, -4.1234e-40,
-9.3690e-02, 1.7765e-01, 1.1275e-01,
9.1159e-03, 1.7375e-01, 1.1427e-01,
-7.8385e-02, 1.5658e-01, -3.8399e-02,
-1.0756e-01, 5.9943e-02, -6.7273e-02,
-1.1117e-01, 1.5267e-01, 1.1563e-01,
-1.2964e-01, -3.8604e-02, -2.4532e-02,
1.6324e-02, 1.3112e-01, 6.1679e-03,
-7.7703e-03, 2.6311e-01, 8.9427e-02,
-2.8948e-02, 1.9341e-01, 4.4339e-02,
6.4559e-03, -6.8885e-02, 1.1481e-01,
-1.0665e-01, 3.8613e-02, 7.0410e-02,
-6.1680e-02, -1.7374e-02, 9.5475e-03,
-4.0081e-02, -3.1549e-02, 2.8311e-01,
-1.2178e-01, -1.3848e-01, 1.7416e-01,
-8.1756e-02, -1.7718e-01, 7.9533e-02,
-3.1299e-03, -3.2305e-03, -3.2094e-03,
-3.1548e-03, -3.2553e-03, -3.2453e-03,
-3.1459e-03, -3.2278e-03, -3.2076e-03,
-3.6554e-05, -3.6715e-05, -3.1284e-05,
-1.4927e-05, -1.4357e-05, -1.2185e-05,
-1.5771e-09, -1.1439e-09, -6.4952e-10,
3.7723e-40, 4.9166e-40, -2.1946e-40,
-4.7599e-40, -4.3356e-40, -8.3928e-41,
2.6127e-40, 4.8634e-40, 2.7720e-40,
-5.4972e-03, -5.6409e-03, -5.6919e-03,
-5.5818e-03, -5.7079e-03, -5.7542e-03,
-5.6338e-03, -5.7437e-03, -5.7600e-03,
-3.7940e-03, -3.8853e-03, -3.8693e-03,
-3.8995e-03, -3.9616e-03, -3.8945e-03,
-3.8438e-03, -3.9156e-03, -3.8269e-03,
-7.2342e-05, -7.8682e-05, -4.7701e-05,
-1.1126e-04, -1.1918e-04, -7.8931e-05,
-1.1644e-04, -1.2418e-04, -8.2350e-05,
-2.3881e-04, -3.7971e-04, -3.9448e-04,
-2.4112e-04, -3.8395e-04, -4.0189e-04,
-2.3451e-04, -3.7525e-04, -3.9222e-04,
-3.9853e-03, -4.0748e-03, -4.1134e-03,
-4.0685e-03, -4.1456e-03, -4.1548e-03,
-4.0547e-03, -4.1388e-03, -4.1357e-03,
5.3008e-02, 2.2252e-02, -7.1158e-02,
-6.6411e-02, -3.0015e-02, -2.2526e-02,
1.2259e-01, -6.2488e-02, 5.6190e-02,
1.5981e-02, -7.6832e-02, 1.7908e-02,
2.7618e-01, 5.4054e-02, 8.7282e-02,
1.5212e-02, -1.1097e-01, -2.2265e-02,
-6.8532e-41, -6.0539e-40, 4.6269e-40,
-2.9221e-40, -3.8468e-40, -4.6656e-40,
6.4572e-40, -6.1625e-40, 6.4545e-40,
3.5920e-02, 9.0955e-02, -1.7626e-02,
4.7826e-02, 1.8832e-01, -4.4043e-02,
-3.8405e-02, 5.9176e-02, 6.8182e-02,
3.7657e-03, 2.6441e-02, -2.5585e-01,
1.0969e-01, 2.3914e-01, 3.5120e-02,
-1.6252e-01, 3.4371e-02, -2.7501e-01,
4.9289e-02, 2.2088e-02, -1.4588e-02,
1.6384e-01, -8.1421e-03, -6.9613e-02,
1.0820e-01, 1.1137e-01, 7.2648e-03,
1.5243e-01, 1.3659e-01, 2.7553e-02,
1.3966e-01, 1.1019e-01, 1.9817e-02,
1.1420e-01, -5.1386e-03, 6.8617e-03,
-1.3264e-02, 2.1508e-01, 4.8430e-02,
5.1149e-02, 2.9165e-01, 2.8077e-01,
2.9288e-03, 9.0611e-02, 8.1538e-02,
-1.1812e-01, 1.5603e-02, 1.1571e-01,
-3.4958e-02, -1.6688e-03, -4.6619e-02,
-1.0417e-02, -3.1802e-02, 1.8357e-02,
1.1064e-01, 1.8397e-01, 4.8449e-02,
-8.3336e-03, 1.6029e-01, 3.9490e-02,
-4.0959e-01, -2.6134e-01, 2.0766e-02,
6.6073e-41, -6.7490e-40, -5.1131e-41,
-4.3320e-41, -3.7194e-40, 2.0674e-40,
-5.2359e-40, -3.4006e-40, -4.9257e-40,
-4.7260e-02, 2.8518e-03, -2.7764e-01,
6.9182e-03, 1.3938e-01, -1.3162e-01,
-6.0901e-03, 1.0339e-01, 6.0419e-02,
-1.4449e-01, -3.2043e-02, -9.1466e-02,
-1.4022e-02, 3.1703e-01, 5.8166e-02,
-1.5243e-02, 1.4521e-01, 2.0790e-04,
-1.0255e-01, -7.8766e-02, -1.2395e-01,
7.9894e-03, 3.7079e-03, -3.2134e-02,
1.1663e-01, 1.4808e-01, 2.0431e-01,
7.4026e-02, 6.9632e-02, 1.7156e-01,
-3.0385e-02, 2.3218e-01, 7.3855e-02,
-8.8530e-02, -5.9224e-02, 2.3431e-02,
1.4596e-02, 3.2442e-02, -1.1308e-01,
-6.3734e-02, 2.5270e-01, 7.8081e-02,
1.0468e-02, 1.5473e-01, 3.8676e-02,
-1.0842e-01, 8.6778e-03, 1.4985e-01,
8.1757e-03, -8.2109e-02, 8.5471e-02,
-2.1437e-01, -6.1173e-02, 4.8163e-02,
2.8965e-01, 1.9748e-01, 4.2651e-02,
1.8196e-01, 3.3932e-01, 3.9594e-01,
3.9657e-01, 4.2167e-01, 2.9290e-01,
7.4011e-41, 6.5220e-40, -5.9885e-40,
7.4011e-41, 6.2047e-40, -7.1533e-40,
4.1950e-40, -1.1886e-40, -5.9922e-40,
1.9662e-01, 2.1402e-01, 3.1041e-02,
-1.1079e-01, 1.3361e-01, -2.1608e-01,
-1.7962e-01, -8.0576e-02, -3.1277e-01,
1.0620e-02, 2.4024e-01, 1.0657e-01,
-7.9906e-05, 2.8760e-01, 4.1231e-02,
-1.3261e-02, -1.0868e-01, -1.1267e-01,
-1.0659e-02, -2.6051e-02, -4.5389e-02,
5.8261e-02, 4.0288e-02, 6.7050e-02,
-2.6462e-01, -1.7846e-01, -1.0002e-01,
-6.2904e-02, 1.5275e-01, 4.4282e-03,
1.4446e-01, 1.1814e-01, -8.0349e-02,
2.0331e-02, 3.3014e-02, 1.2710e-01,
1.6084e-01, 3.8819e-01, 1.0854e-01,
-6.8126e-03, 3.5673e-01, 1.8938e-01,
-1.1660e-01, -5.7694e-02, -2.9194e-01,
1.2775e-02, -3.2769e-02, 1.7228e-02,
1.8324e-01, 1.1983e-01, -1.6944e-02,
1.0593e-01, 1.3451e-01, 5.2536e-02,
1.9147e-01, 1.3875e-01, 1.0298e-01,
-2.0871e-01, -1.7197e-01, 1.1342e-01,
-1.7581e-01, 4.0972e-02, 2.9796e-01,
3.2588e-40, -4.3663e-40, -2.6518e-40,
3.2588e-40, -4.3663e-40, -2.6518e-40,
4.1600e-40, -4.4350e-40, -4.8744e-41,
3.7289e-02, 8.1769e-03, 1.7059e-02,
3.7735e-02, 6.6571e-02, -6.6137e-02,
-5.8890e-02, -7.7019e-03, -6.2128e-02,
-4.0751e-02, 1.1710e-01, -1.1586e-01,
-1.2999e-01, -1.6384e-02, -2.1858e-01,
-2.8028e-01, -6.0443e-02, -1.1880e-01,
1.8152e-01, 1.5364e-01, 1.1781e-01,
2.9010e-01, 2.4612e-01, 1.3170e-01,
1.9022e-01, 1.8117e-01, 1.6483e-01,
9.3342e-02, 2.6607e-01, 1.4679e-01,
1.6729e-01, 2.5374e-01, 1.1954e-01,
6.3258e-02, 1.0557e-01, 6.7221e-02,
-5.2017e-02, 1.9628e-01, 1.7243e-01,
-3.2667e-02, 1.5756e-01, 1.9347e-01,
-9.5252e-02, -3.7525e-02, -3.4543e-04,
-4.9759e-02, 4.0383e-02, -2.0231e-02,
-1.1776e-01, 3.4182e-02, 3.6720e-02,
-1.4822e-02, -4.1658e-02, -1.3729e-02,
-1.9215e-02, 2.4427e-02, -9.0638e-02,
-1.4438e-01, -2.1785e-01, -5.1789e-02,
-2.0279e-01, -3.3918e-01, -1.6871e-01,
6.1262e-41, 2.4066e-40, 6.6851e-40,
5.3430e-40, -3.2335e-40, -3.7400e-40,
-6.3256e-40, -4.7491e-40, 2.2854e-40,
-6.8701e-03, -1.4849e-02, 8.6332e-02,
1.1686e-01, 1.8346e-01, 1.8797e-01,
-2.3251e-02, 7.3973e-02, 1.0532e-01,
-6.1838e-02, 5.6667e-02, 8.1584e-02,
-3.8900e-02, 7.0927e-02, 9.5606e-02,
-4.5098e-02, -1.0829e-01, -1.2224e-01,
3.5047e-03, 3.2898e-02, 3.5622e-02,
1.6170e-02, 4.3721e-02, 9.7496e-02,
2.3445e-03, 6.0417e-02, 1.3482e-01,
6.0570e-02, -5.7139e-03, -1.0883e-03,
2.2701e-02, -2.9113e-02, 7.9178e-03,
8.1214e-02, -4.1408e-02, 1.3616e-02,
-4.7985e-02, 1.0304e-02, -3.3236e-02,
-1.6334e-02, -8.1538e-02, 1.8629e-02,
-9.3720e-02, -1.2920e-01, -4.0836e-02
}
,
{
1.0443e-01, 1.5461e-01, -1.4743e-01,
1.6716e-01, 1.0532e-01, -2.3088e-01,
1.0218e-01, 1.2393e-01, -9.6646e-02,
1.7659e-01, -7.3279e-02, 1.9627e-02,
1.7721e-01, -1.4329e-01, -1.2533e-01,
1.6551e-01, -3.4616e-01, 9.5618e-02,
4.5827e-09, 9.3413e-09, 1.7015e-08,
1.2245e-08, 9.9727e-09, 6.7108e-09,
1.9612e-07, 3.9479e-08, 1.1537e-09,
2.2127e-02, 9.2715e-02, -1.2150e-01,
7.5652e-02, 1.1548e-01, -1.2420e-01,
-1.0693e-03, -7.2839e-02, -1.9664e-01,
1.4466e-01, -1.8552e-03, -1.3575e-01,
2.0699e-01, 8.0396e-02, -1.9651e-01,
-4.7075e-02, -5.1259e-02, -8.2593e-02,
-2.2385e-01, 3.0066e-03, -2.2659e-02,
6.1827e-02, 2.5331e-02, -5.3898e-02,
2.7091e-01, 1.0991e-01, -3.3600e-01,
-8.9499e-02, -9.3821e-03, 2.2675e-02,
1.1213e-01, 1.3276e-01, 2.0368e-02,
6.5408e-02, 4.1598e-02, -4.7917e-02,
6.0740e-03, 1.2236e-04, -1.0659e-01,
-1.8072e-02, -9.1082e-02, -9.0414e-02,
4.9052e-02, -1.4298e-01, -3.9721e-02,
1.1840e-01, 2.2503e-01, 2.4587e-02,
9.3023e-02, 6.9650e-02, 1.6798e-01,
-1.5640e-03, 1.6300e-02, 6.3585e-02,
1.4431e-01, 3.7885e-02, 1.6692e-02,
1.7345e-01, 7.2315e-02, 1.8942e-02,
1.1081e-01, 8.2973e-02, -9.7717e-02,
-5.2264e-03, -5.2641e-03, -5.2727e-03,
-5.2809e-03, -5.3125e-03, -5.3153e-03,
-5.2915e-03, -5.3251e-03, -5.3231e-03,
6.0008e-02, 2.0268e-01, 1.3396e-01,
-2.5202e-03, -1.7750e-02, -1.2019e-02,
1.1806e-01, -2.2306e-02, 3.6464e-02,
7.9324e-02, 3.1883e-02, 1.5483e-02,
-4.3537e-02, 1.2204e-02, 1.8905e-02,
-8.1581e-02, -1.1307e-01, -6.0718e-02,
-2.4865e-01, -1.0199e-01, 1.9886e-02,
-1.0519e-02, 6.9972e-02, 4.8012e-02,
-1.5282e-02, 1.1979e-01, 8.7968e-02,
-3.6752e-02, 1.9523e-02, 7.1321e-02,
-5.8295e-02, 5.3242e-02, 1.2773e-01,
-7.9671e-02, 8.3249e-04, 7.4904e-02,
1.1792e-01, 2.2135e-03, -9.0963e-03,
-2.8356e-03, -4.2661e-02, 6.9497e-02,
9.3561e-02, 1.0475e-01, 5.4745e-02,
-8.5901e-02, -2.1969e-01, -1.5572e-01,
3.6473e-02, 1.1097e-01, -2.6830e-02,
1.2199e-02, 1.8917e-01, 1.1906e-01,
1.0664e-01, -2.7005e-01, 1.5492e-01,
-4.1771e-02, -1.6580e-01, 2.9234e-02,
-1.9854e-02, 2.1436e-01, -1.1100e-01,
4.5382e-04, 4.2085e-04, 5.6852e-04,
3.4951e-04, 3.7354e-04, 3.2786e-04,
2.0790e-04, 2.8606e-04, 3.2415e-04,
-1.5500e-02, 2.2865e-02, -3.0070e-01,
1.8467e-01, 2.4899e-01, 1.4812e-02,
-1.2318e-01, 2.3175e-01, 7.2244e-02,
1.6713e-01, 1.9089e-02, -2.7494e-01,
1.0202e-01, 2.9200e-01, -3.6055e-03,
1.3265e-01, 2.2551e-01, 1.9897e-01,
-3.9474e-02, 1.6262e-01, 1.6726e-01,
-8.6222e-02, 2.0573e-01, -7.3247e-01,
-9.5391e-02, 3.8933e-01, 1.5861e-01,
-1.2202e-01, -6.4735e-02, -1.1762e-01,
-2.2427e-02, -1.9171e-01, -1.6092e-01,
3.2356e-01, -2.2234e-01, -1.3743e-01,
-1.1493e-01, -2.4936e-02, 2.9212e-02,
-9.8112e-02, -1.8021e-02, -1.0507e-01,
-1.0168e-01, 1.1759e-01, -9.8203e-02,
-2.8871e-02, 1.3249e-01, 7.8378e-02,
-1.1012e-01, -4.0596e-02, 5.4202e-02,
4.9022e-02, -1.1744e-01, 9.8888e-02,
1.3343e-02, 1.4358e-01, -8.7142e-02,
1.9952e-01, 3.3708e-02, 2.0721e-02,
2.6527e-02, -2.3822e-01, 2.4706e-01,
-3.2750e-04, -2.8475e-04, -6.3494e-05,
-2.2378e-04, -1.8046e-04, -1.9242e-05,
-4.2124e-05, -2.2062e-05, 4.5500e-07,
1.1692e-01, 4.0366e-01, -1.8709e-02,
8.2700e-02, 1.7884e-01, -1.3520e-01,
3.7758e-02, 3.7048e-02, -2.8109e-01,
-2.3438e-01, 5.9423e-02, -1.7300e-01,
1.0343e-02, 7.2307e-02, -4.3852e-01,
-5.7429e-02, -4.9136e-02, -8.0327e-02,
8.1094e-02, 2.9118e-02, 1.6677e-01,
1.2155e-01, 6.5358e-01, 2.4544e-01,
3.1163e-02, 3.7463e-02, -2.6613e-01,
1.2723e-01, 1.2541e-01, 1.4319e-02,
1.9055e-01, -5.7441e-02, 1.1146e-01,
-1.0690e-02, -1.7567e-01, -1.2238e-01,
-2.0879e-01, -6.5278e-02, -7.9327e-02,
-1.6564e-01, -1.3659e-01, -2.6231e-01,
-3.1916e-01, -2.6553e-01, -9.8647e-02,
-1.0617e-01, 1.2782e-01, -2.1053e-02,
-1.2329e-01, 1.4952e-01, -1.7466e-02,
-1.6969e-01, 3.6980e-02, -6.7732e-02,
-3.1220e-02, 4.0615e-02, -1.5251e-01,
-2.0017e-01, 2.2421e-01, -2.5682e-02,
-6.5873e-02, 1.8346e-01, 1.2982e-02,
1.4021e-06, -1.6929e-05, -8.4696e-05,
1.9580e-05, 2.9943e-06, 3.0084e-06,
2.0769e-04, 1.4661e-05, 2.9503e-06,
-1.4485e-01, 1.8841e-01, -1.7954e-01,
2.1551e-01, 2.2601e-01, -8.6689e-03,
8.6926e-02, -6.8989e-02, -1.2683e-01,
-8.7712e-02, 6.3176e-02, 1.1983e-01,
1.0790e-01, 6.6418e-02, 6.5849e-02,
1.2483e-01, 1.2428e-01, 4.4994e-02,
1.5139e-01, -1.2116e-01, -3.5497e-01,
-6.1889e-02, 3.4088e-01, 1.3148e-01,
-1.6478e-01, 4.4477e-02, -1.1979e-01,
3.8343e-02, 1.7992e-01, 3.6790e-01,
3.0426e-01, 1.1235e-01, 4.9815e-01,
2.6290e-01, 1.9703e-01, 1.5881e-01,
-6.4678e-03, 2.4401e-01, 1.9266e-01,
-1.4089e-01, 1.2323e-01, 4.4340e-02,
-8.8856e-02, 8.4036e-02, -9.8488e-02,
-1.7377e-03, -1.7654e-03, -1.7223e-03,
-1.7651e-03, -1.7919e-03, -1.7491e-03,
-1.7172e-03, -1.7446e-03, -1.7041e-03,
-3.0384e-04, -2.9297e-04, -2.4838e-04,
-3.2961e-04, -3.1678e-04, -2.7009e-04,
-3.1665e-04, -3.0492e-04, -2.6122e-04,
3.7109e-40, -3.7915e-40, -5.2536e-40,
5.8286e-41, -5.6108e-40, 4.3331e-40,
-3.0184e-42, -4.8987e-40, -5.1788e-40,
-4.0457e-04, -4.3257e-04, -4.1616e-04,
-4.2268e-04, -4.5118e-04, -4.3407e-04,
-3.9446e-04, -4.2199e-04, -4.0650e-04,
-1.1253e-16, -1.1328e-14, -2.0489e-14,
-3.0346e-19, -1.7189e-16, -4.5141e-16,
-2.4957e-30, -1.8191e-23, -3.5882e-22,
-3.1610e-36, -1.7544e-24, -2.2187e-21,
-4.2887e-19, -1.5526e-15, -1.5160e-14,
-1.7750e-16, -6.8066e-14, -3.3764e-13,
-6.9570e-24, -5.1139e-23, -2.9335e-23,
-1.9091e-22, -1.0323e-21, -4.5931e-22,
-2.0010e-22, -9.3710e-22, -3.5622e-22,
-2.9470e-04, -2.9081e-04, -2.5958e-04,
-3.2290e-04, -3.1810e-04, -2.8461e-04,
-3.1795e-04, -3.1356e-04, -2.8121e-04,
6.1623e-02, 1.7057e-01, 8.0478e-02,
1.2624e-01, 1.8468e-01, 2.1901e-02,
7.6033e-02, 1.3455e-01, 8.4037e-02,
8.4434e-02, -1.7069e-02, -7.8318e-02,
4.9244e-02, 4.4782e-02, -6.9747e-02,
1.2915e-01, 1.1453e-01, -6.5243e-02,
-5.0985e-03, -5.1407e-03, -5.1687e-03,
-5.1185e-03, -5.1511e-03, -5.1712e-03,
-5.0986e-03, -5.1272e-03, -5.1409e-03,
-1.8186e-02, 6.2680e-02, 3.3235e-02,
1.3398e-02, 1.6497e-01, 4.3523e-02,
-2.4101e-02, 1.3316e-01, 1.8373e-02,
-6.2677e-04, 6.5026e-03, 2.5948e-02,
6.6542e-02, 1.2352e-01, 1.5155e-02,
-8.6237e-02, -2.0907e-02, 1.0237e-02,
-1.7807e-01, -8.6196e-02, -3.2408e-02,
-8.1946e-03, -1.3957e-02, -1.6733e-01,
2.6269e-02, 1.6817e-01, 9.4029e-02,
3.4005e-02, -1.2833e-02, -1.2038e-01,
-4.8950e-02, 3.9857e-02, 1.4048e-02,
-6.4758e-02, 9.9603e-02, 1.0748e-01,
-1.0850e-02, 9.8875e-02, -4.4439e-02,
9.1219e-02, 6.6400e-02, -6.7693e-02,
5.3318e-02, 1.1838e-02, -1.5164e-01,
-5.8568e-02, 1.1249e-01, -3.8286e-02,
-7.1122e-02, 9.5799e-02, 3.8521e-02,
-1.3846e-01, 1.4167e-01, -3.5500e-03,
-1.0343e-01, -3.3025e-02, 3.7186e-02,
-2.0769e-03, 1.3558e-01, -1.3009e-01,
1.0167e-02, 1.5358e-02, -9.8009e-02,
2.4123e-05, -1.1800e-05, -1.4180e-04,
3.5217e-05, -6.3838e-06, -1.2243e-04,
8.5525e-05, 2.1599e-06, -5.3290e-05,
-1.4471e-01, 2.0111e-02, -1.2449e-01,
5.3368e-02, 3.2918e-01, 1.4034e-01,
-1.1833e-01, -1.9225e-02, -1.2658e-01,
-2.6966e-01, 1.1751e-01, 9.7072e-02,
-1.9929e-01, 9.7986e-02, -5.1240e-02,
-9.5073e-02, -6.8070e-02, -2.1318e-01,
9.5305e-02, -4.0551e-02, -1.0936e-01,
5.2687e-02, 4.5340e-01, 2.3531e-01,
-1.3385e-02, 1.5922e-01, -1.8371e-01,
-1.2203e-01, -7.2567e-02, -3.0000e-01,
-3.4356e-02, -1.3471e-01, -9.0995e-02,
-2.5230e-01, -2.4846e-01, -1.8529e-01,
-1.6962e-01, 1.0905e-01, 1.1557e-01,
-1.4405e-01, 8.9191e-02, 1.1715e-01,
-1.3237e-01, 5.2092e-02, -1.2227e-01
}
,
{
2.0013e-01, 2.2105e-01, 1.9196e-01,
6.8158e-02, 1.7154e-01, -8.6677e-02,
9.2652e-02, 1.0789e-01, 1.6745e-01,
-2.9254e-01, -7.6815e-02, 5.8812e-02,
-4.6466e-02, 1.3941e-02, 2.3353e-01,
-1.5033e-01, 7.5167e-02, 1.4433e-01,
2.8008e-02, 3.1625e-01, 3.2877e-02,
-5.8835e-02, -1.7305e-01, -6.1558e-02,
-1.2227e-01, 3.9931e-02, 3.0300e-02,
2.3004e-01, 4.1834e-02, -5.7790e-02,
-2.2861e-01, 2.9314e-01, 1.6884e-01,
-2.8009e-02, 4.7550e-02, -4.4542e-02,
-2.4674e-01, -1.5483e-01, 3.2653e-02,
-2.1574e-01, 3.1083e-01, -1.4025e-03,
1.7354e-02, 5.6417e-02, 1.0844e-01,
-4.2681e-40, 4.5893e-42, -7.4234e-40,
1.7665e-40, 4.0151e-40, 4.6269e-40,
2.5452e-40, -7.0179e-40, -1.2338e-40,
-1.4957e-01, -1.9087e-02, 7.1170e-02,
-1.4435e-01, 8.9560e-02, 1.3879e-01,
-3.6992e-02, 5.9822e-02, 1.9241e-02,
-2.4402e-03, 1.5097e-01, 6.3958e-02,
-1.7630e-01, 3.6009e-01, -2.0383e-01,
-8.5106e-03, 4.0863e-03, -2.7575e-02,
7.8942e-02, -1.8640e-01, -6.7715e-02,
7.2777e-02, -1.3804e-01, -7.0332e-02,
1.5185e-01, -4.3530e-02, 1.4502e-01,
-3.2928e-02, -3.0583e-02, 9.2061e-02,
1.2493e-01, 1.0400e-01, 1.3780e-01,
1.4438e-01, 8.2051e-02, 1.6159e-02,
2.7478e-02, 1.7768e-01, 2.5945e-01,
-3.4662e-01, 2.0330e-03, 8.8118e-02,
-2.9628e-01, -1.3212e-01, -1.8145e-02,
-1.9330e-01, 3.9238e-02, -4.6944e-02,
-1.5668e-01, -5.7104e-02, 1.9558e-01,
6.5305e-02, 5.9933e-02, 7.7337e-02,
-2.4906e-02, -1.1235e-01, 1.3822e-02,
-3.9988e-02, -9.1882e-03, 1.9204e-02,
1.0504e-01, 4.6820e-03, -2.1836e-02,
-2.6953e-40, 2.5334e-40, -1.3028e-40,
1.4110e-41, 5.6841e-40, 3.6368e-40,
-1.1746e-41, -7.0658e-41, -3.9413e-40,
1.5025e-02, 7.4419e-02, 9.5652e-02,
5.0297e-02, 6.6704e-02, 5.7316e-02,
2.5102e-02, 1.1985e-01, 2.6043e-02,
3.3297e-02, -7.7374e-02, -1.1114e-01,
-7.5586e-02, -1.9338e-02, -1.3739e-02,
4.5616e-02, -6.4946e-02, -6.9372e-02,
-7.5874e-03, -1.1141e-01, -2.9135e-02,
-6.9436e-03, -1.4418e-02, 1.6436e-03,
-1.3051e-01, -1.3324e-01, -9.3934e-02,
1.2184e-01, 1.9386e-01, 1.7995e-01,
-2.7452e-02, 9.9736e-02, 1.0020e-01,
-6.3290e-02, -2.1447e-02, -1.7005e-01,
1.3857e-01, 2.3338e-01, 2.5410e-01,
2.3002e-01, 1.9551e-01, 1.4452e-01,
4.7040e-01, 2.2647e-01, 1.5215e-01,
2.6927e-02, -2.1304e-01, -1.4762e-01,
-5.6998e-02, 2.9064e-01, 1.8085e-01,
8.9393e-02, -1.7463e-01, -2.7095e-01,
3.8434e-02, 1.7198e-01, -1.8122e-02,
-1.3857e-01, 1.9418e-01, 1.5019e-01,
-5.6337e-02, -5.3265e-01, 3.2122e-01,
-2.4484e-40, -5.3707e-40, 1.5854e-41,
5.1791e-40, -4.1875e-41, 5.6732e-40,
1.3048e-40, 1.6452e-40, -4.5028e-40,
-3.0692e-02, 1.8569e-01, 2.0327e-01,
-7.4756e-02, -5.1765e-02, 4.2475e-02,
-9.0675e-02, -3.0438e-01, -3.5088e-01,
-1.9129e-02, -1.5663e-03, 4.9895e-02,
-1.9441e-02, 9.3237e-02, 1.2910e-01,
-2.3919e-02, -4.0539e-01, 2.8167e-02,
2.0203e-01, 3.3424e-02, 1.7927e-02,
4.1923e-02, -1.6967e-01, 2.5656e-02,
-1.5869e-01, -1.8727e-01, 2.7860e-03,
-4.0276e-02, -6.7792e-03, 3.3699e-02,
-6.7044e-03, 1.7686e-02, 2.9786e-02,
-1.5623e-02, 3.7904e-02, 2.4737e-02,
-1.2282e-01, -3.6563e-02, 4.1976e-02,
-9.9622e-03, 8.8981e-02, 2.1364e-02,
-8.5668e-02, -1.6803e-01, -4.4974e-02,
1.3164e-01, 4.1294e-01, 1.8897e-01,
2.1991e-01, 1.6247e-02, 1.1569e-01,
-3.0142e-02, 1.4069e-02, 3.6646e-02,
-2.6816e-02, -3.9767e-02, 1.4061e-01,
-1.3603e-01, -2.0649e-01, 7.5837e-02,
-1.6984e-02, -8.3800e-03, 2.3652e-04,
1.5049e-40, 4.6504e-40, 1.3625e-40,
-7.5358e-40, -3.4257e-40, 9.9763e-41,
4.7243e-40, 7.4890e-40, -7.9440e-42,
-5.9692e-02, -2.8047e-02, 2.3795e-02,
-3.5284e-02, 1.1448e-02, 5.0302e-04,
-3.5066e-02, 4.6185e-02, 1.2167e-02,
3.7583e-02, -3.6598e-02, 1.0206e-01,
-9.6229e-02, -1.5977e-01, 4.9157e-02,
3.7293e-02, 5.8766e-02, 1.0448e-02,
1.1490e-01, 1.4459e-01, 8.6936e-02,
2.8609e-01, -4.8108e-02, 9.0023e-02,
6.7941e-02, -5.7148e-03, 1.0021e-01,
7.3816e-02, 7.3794e-02, 8.0970e-03,
2.8307e-02, 3.6635e-03, -1.1769e-01,
4.1374e-02, 3.9933e-02, -4.4292e-02,
5.9423e-02, 1.9009e-01, -2.3735e-01,
-2.6670e-01, 5.8789e-01, -2.0048e-01,
-3.7082e-01, 1.8045e-01, 5.4820e-02,
-6.3567e-01, 2.0098e-01, 1.0653e-01,
-2.5056e-01, 6.5065e-01, -4.0471e-01,
5.4715e-02, 2.4375e-01, -2.7402e-01,
1.5982e-01, 1.0923e-01, 2.1566e-01,
2.0239e-01, -9.0221e-02, -4.4606e-01,
1.0550e-01, 5.4666e-02, -2.7134e-01,
-4.6424e-40, 2.9137e-40, 7.4968e-41,
1.2376e-41, -5.6213e-40, -6.3457e-40,
2.5404e-40, 2.0013e-40, 3.5611e-40,
5.5423e-02, 3.9843e-02, -1.7509e-01,
5.4480e-02, 5.0331e-02, -1.6793e-01,
6.6093e-02, 3.0163e-02, -8.2023e-02,
-1.5490e-01, 1.7457e-01, 2.7832e-01,
1.1482e-01, 2.5759e-01, -2.4199e-01,
-9.3891e-02, 9.1921e-02, -6.4480e-03,
1.9266e-01, 5.2907e-02, 7.0289e-02,
1.3582e-01, 6.4246e-02, 1.4989e-01,
6.2013e-03, -6.8884e-02, 6.8734e-02,
-1.0483e-01, -7.7134e-02, -3.6204e-02,
1.7590e-02, 5.0844e-02, 1.4234e-01,
7.2913e-02, 6.0726e-02, 6.4414e-02,
-8.5021e-02, -1.0621e-03, 5.5851e-02,
2.4666e-01, 6.5652e-02, -1.8180e-02,
1.5225e-01, 1.2928e-01, 3.1578e-03,
1.1468e-01, 1.9544e-01, 6.6637e-02,
6.3430e-02, 2.0542e-01, 7.0876e-02,
3.4779e-02, 1.0037e-02, -2.2134e-02,
-6.9304e-02, 1.1184e-01, -3.7015e-02,
-1.7634e-01, 1.2475e-01, 9.1947e-02,
-6.0550e-02, -1.3904e-01, 7.5192e-02,
-2.2871e-40, 4.7367e-41, -1.0711e-40,
-2.8662e-40, 4.0542e-41, 3.3067e-40,
-4.4395e-41, -7.2684e-41, 1.8695e-40,
-1.6702e-01, -2.6654e-01, 8.7902e-03,
-2.0108e-01, -3.8093e-01, -8.3700e-02,
-7.5433e-02, -2.0689e-01, 2.7951e-02,
2.9938e-03, 1.1378e-01, 7.1598e-02,
-1.6031e-01, 1.3475e-01, 1.5800e-01,
-7.2019e-02, -1.1663e-01, 8.0692e-02,
1.0610e-01, 1.1163e-02, -1.4959e-01,
-1.1576e-01, -8.5645e-02, 4.0414e-02,
5.6245e-02, 1.7056e-01, 2.5734e-01,
-6.1086e-02, -7.0851e-02, 7.6851e-02,
-2.7595e-02, -6.0890e-02, 4.7472e-02,
7.1059e-03, 6.0942e-05, 7.4915e-02,
1.9350e-01, -1.8458e-02, -2.3040e-02,
6.3477e-02, 1.1923e-01, 9.9319e-02,
6.4839e-02, 2.7973e-01, 1.2902e-01,
-1.7829e-01, 5.7083e-03, -6.1680e-03,
-1.1256e-01, -2.7951e-02, -2.1544e-01,
-2.1614e-02, -7.1468e-02, -2.2054e-02,
-8.7543e-02, -1.2982e-01, 1.9386e-01,
-5.7157e-03, -1.0108e-01, 1.4467e-01,
-6.5742e-02, -7.2054e-02, 1.7924e-01,
7.5418e-40, 6.3043e-40, 4.9815e-40,
-1.0952e-40, 3.0327e-40, -2.3848e-40,
4.1302e-40, 2.0150e-40, -1.6509e-40,
-1.3985e-02, -1.0550e-01, 5.8772e-02,
-1.7108e-02, -7.3644e-02, 3.3014e-02,
-1.8224e-03, 2.8931e-03, 9.2762e-02,
4.1531e-02, -1.5139e-01, -1.7773e-01,
9.6548e-02, -1.1914e-01, -4.6536e-02,
8.6754e-02, -4.0057e-03, 1.8983e-01,
1.6545e-01, -4.7311e-02, -7.2455e-03,
3.7567e-01, 1.8883e-01, -7.4325e-02,
-5.8252e-02, -1.3811e-02, -7.0470e-02,
-3.2943e-02, -7.0770e-02, -1.4700e-01,
1.7043e-02, 9.4331e-02, 4.2857e-03,
4.1247e-03, 1.6690e-01, 4.2146e-02,
1.1420e-01, -7.4456e-02, -3.8763e-02,
1.6807e-01, 9.3636e-03, -1.1796e-01,
1.7703e-01, 1.1386e-03, -6.8707e-02,
1.0259e-01, -1.8918e-02, 6.5902e-03,
1.2421e-02, -7.8960e-02, 2.1766e-02,
1.3062e-01, 4.6001e-02, 2.4199e-01,
-1.2955e-02, -1.9329e-01, 5.2074e-03,
5.9446e-02, 1.8832e-01, 2.2094e-01,
-1.0954e-01, -8.1867e-02, -4.3324e-02,
-3.9596e-41, 2.8677e-40, -6.5843e-40,
4.2812e-41, -3.5323e-40, 4.8298e-40,
7.6351e-40, -2.4759e-40, 7.3030e-40,
-1.1284e-01, -8.4171e-02, -1.5935e-01,
-3.2299e-02, 1.5427e-01, 8.9029e-02,
-3.8815e-02, 1.3098e-01, -4.3065e-02,
-2.5276e-01, -1.7018e-01, 9.7901e-02,
1.4218e-01, 3.1236e-01, 2.9636e-01,
-2.3613e-02, -5.5258e-02, -2.0550e-01
}
,
{
0.0333, 0.1145, -0.0922,
0.1185, 0.4533, -0.2015,
-0.0774, 0.1759, -0.0496,
0.0954, -0.0499, 0.0824,
0.1059, 0.0173, -0.0586,
-0.0666, -0.0287, -0.0652,
-0.0558, -0.1362, 0.0015,
0.1277, 0.1020, -0.1369,
0.0020, -0.0103, -0.0804,
0.0507, 0.1404, -0.0241,
0.0520, 0.1239, 0.0633,
-0.0268, 0.0335, 0.0883,
-0.0549, -0.1022, -0.0515,
-0.0163, -0.1167, -0.0442,
0.0858, -0.0804, -0.0014,
0.0354, -0.0666, -0.2105,
-0.0950, 0.1578, -0.0920,
-0.1303, 0.0299, -0.0195,
-0.0281, -0.1993, -0.0154,
0.0796, 0.0503, 0.0954,
0.0540, 0.0212, 0.0389,
-0.1387, 0.1091, -0.1212,
0.1556, 0.3573, 0.0976,
-0.0587, -0.2070, 0.2067,
0.0138, 0.0051, -0.1008,
0.2877, 0.1079, -0.0681,
0.0953, -0.0739, -0.2349,
0.1482, 0.0657, 0.0480,
0.1590, -0.0009, 0.1402,
0.0700, 0.0435, 0.1190,
0.0957, 0.0117, -0.1010,
0.1790, -0.0200, -0.0765,
0.0797, 0.1455, -0.0340,
0.0008, -0.0267, 0.0089,
0.0644, 0.0647, 0.0397,
0.0463, -0.0116, -0.0771,
0.2237, 0.0324, 0.0192,
-0.0082, -0.0345, 0.0294,
0.0719, -0.0185, 0.1008,
-0.0307, 0.0134, -0.0747,
0.0776, -0.1485, 0.0135,
0.0965, -0.0665, -0.1263,
-0.0101, -0.0097, -0.0144,
-0.0022, -0.0083, 0.0277,
0.0136, -0.0076, 0.0314,
-0.0008, 0.0722, -0.0704,
0.0053, 0.0767, 0.0368,
-0.0189, -0.1354, 0.0231,
-0.1416, 0.1945, -0.1756,
0.2058, 0.0401, -0.1348,
-0.0945, -0.2530, -0.3082,
-0.0096, 0.0871, 0.0699,
-0.0092, 0.0423, 0.0995,
-0.0914, -0.0570, -0.0718,
-0.0739, -0.2749, -0.2320,
0.1488, -0.2698, -0.1977,
0.1445, -0.1655, -0.0758,
0.2035, -0.0138, 0.0332,
0.0282, -0.2247, -0.0945,
-0.0614, -0.2484, -0.0595,
-0.1174, -0.1252, 0.1969,
-0.1101, -0.2950, -0.2164,
-0.0348, -0.0891, 0.1250,
0.0195, 0.0050, 0.0300,
-0.0508, -0.0316, -0.0194,
0.0199, 0.0345, 0.0444,
-0.0022, -0.0529, 0.1604,
0.0756, -0.2015, -0.2117,
-0.0837, -0.1270, 0.1330,
0.0286, 0.0952, 0.1082,
0.0724, -0.0446, -0.1156,
0.0545, 0.0444, -0.0291,
0.0759, 0.1110, 0.0944,
0.1615, 0.4302, -0.1060,
0.0418, -0.0281, -0.1378,
-0.0757, -0.0527, -0.1578,
0.0123, -0.0427, 0.1504,
0.0694, 0.0690, 0.0203,
0.2132, -0.3449, 0.0936,
0.2491, 0.0279, -0.0884,
-0.0447, 0.1589, -0.0054,
-0.0246, 0.1247, 0.0403,
0.0513, -0.0541, -0.1141,
0.0712, -0.1174, -0.0051,
0.2304, 0.2431, -0.0517,
-0.1548, -0.0401, 0.2032,
-0.0087, -0.1676, -0.0600,
0.1094, -0.0329, 0.0530,
-0.0580, 0.1499, -0.0806,
-0.0086, -0.1400, -0.0636,
0.0708, -0.1003, -0.1113,
-0.0732, -0.1199, 0.0060,
-0.0534, -0.0011, 0.0965,
-0.0268, 0.0116, -0.1161,
0.0787, 0.3925, -0.0819,
-0.0041, -0.0892, -0.2063,
-0.1296, 0.0924, -0.0079,
0.5625, 0.4013, 0.1645,
-0.0137, -0.1935, 0.2714,
0.0980, 0.0016, -0.1461,
0.1576, 0.0305, -0.1450,
0.1503, -0.0303, -0.1403,
0.0262, -0.0077, 0.0459,
0.2718, 0.0754, 0.2404,
0.1381, -0.1499, 0.0016,
0.1454, -0.1278, -0.0085,
0.1674, -0.0834, 0.1993,
0.0874, -0.0598, -0.0188,
0.2003, 0.3296, 0.0153,
-0.0154, 0.5550, -0.0945,
0.0489, 0.0415, -0.0940,
0.0164, 0.0791, 0.1077,
-0.0893, 0.1231, 0.0473,
-0.0319, 0.1444, 0.1690,
-0.0518, -0.1404, -0.1778,
-0.0170, 0.1395, -0.0234,
0.0128, -0.0112, -0.0472,
0.1039, 0.1982, -0.0272,
0.0282, -0.1199, -0.2622,
-0.0449, 0.0239, -0.1030,
-0.0840, -0.1044, -0.0646,
0.0588, 0.1937, -0.2494,
0.0180, 0.0747, 0.1530,
0.0500, 0.1756, 0.0491,
-0.1113, -0.0079, 0.0854,
-0.1493, -0.0559, -0.0373,
0.1972, -0.3158, -0.0500,
0.1932, 0.3177, -0.0018,
-0.0516, -0.1144, 0.0686,
0.0175, 0.0598, 0.0345,
-0.0667, -0.1078, 0.0384,
0.0897, 0.2198, -0.0531,
-0.2596, -0.1997, 0.0195,
0.0332, 0.4098, 0.1381,
0.1985, -0.0669, -0.1275,
-0.0751, -0.2388, -0.0672,
0.0090, 0.0891, -0.0362,
0.1392, -0.0518, 0.2039,
0.2079, -0.1202, 0.0707,
0.0498, -0.1237, -0.0665,
-0.0398, -0.1557, -0.0928,
0.0505, 0.1220, 0.0352,
-0.0674, -0.1159, 0.0724,
-0.0331, -0.1751, 0.0766,
0.0992, -0.0763, 0.0090,
-0.1223, 0.2621, -0.2029,
0.0509, -0.0279, -0.1061,
0.0598, 0.0353, -0.1610,
0.0165, 0.0835, 0.0704,
-0.0079, -0.0982, 0.0187,
0.2331, -0.1929, 0.0684,
-0.0507, 0.1476, -0.0886,
-0.0275, 0.1658, 0.0697,
-0.1123, -0.0069, -0.0851,
-0.0377, -0.0917, -0.0629,
-0.0420, 0.0506, 0.1111,
0.1086, 0.1351, -0.0851,
0.0466, 0.2750, 0.0185,
-0.0208, 0.2090, 0.0271,
0.0217, -0.0548, 0.0078,
-0.0609, 0.1029, -0.1641,
0.1392, 0.0115, 0.0317,
-0.0570, 0.1060, 0.1814,
-0.2015, -0.1301, 0.1082,
0.2452, -0.1815, -0.0046,
0.0103, -0.0466, -0.0895,
0.0158, -0.0594, -0.1386,
-0.0073, -0.0719, -0.0716,
0.1308, -0.0206, 0.0511,
-0.0437, -0.0763, 0.0287,
0.0493, -0.1239, 0.0219,
-0.0041, 0.0373, 0.0262,
0.0078, -0.0249, -0.0284,
0.0598, -0.0205, -0.0276,
0.0115, -0.1778, -0.0395,
0.1673, -0.0036, 0.2334,
0.0706, -0.0694, 0.0177,
0.1123, -0.0043, 0.0716,
-0.0894, -0.1609, 0.0334,
-0.0046, -0.2006, -0.0977,
-0.0127, 0.1198, -0.0339,
-0.0283, 0.1354, 0.1637,
-0.1696, 0.0187, -0.2621,
0.0496, 0.2834, 0.0423,
0.1126, 0.3962, 0.1660,
-0.0750, 0.1955, 0.0590,
-0.1088, -0.1146, -0.1219,
0.1360, 0.1524, 0.0498,
-0.1151, 0.0219, -0.0063,
-0.0821, 0.0247, -0.1065,
0.1153, 0.2085, 0.0618,
-0.0383, 0.0527, -0.2067
}
,
{
1.8014e-01, 2.1908e-01, -2.1088e-03,
1.7345e-01, 2.7654e-01, 1.3607e-02,
1.1363e-01, 9.9105e-02, -6.5730e-02,
-3.5679e-02, 9.6072e-03, 4.0721e-02,
-1.8771e-02, -2.3484e-04, -1.0230e-02,
1.6965e-02, -1.3032e-02, -6.3906e-02,
-4.5686e-02, -3.6733e-02, -4.8873e-02,
4.0752e-02, 2.1615e-02, -1.4822e-02,
1.1689e-01, 3.0153e-02, -5.0163e-04,
-7.0394e-03, -1.2387e-01, -8.9243e-02,
-1.8312e-01, -1.3868e-01, -6.2618e-02,
-8.1627e-02, -2.0480e-01, -3.0740e-01,
4.4296e-02, 3.8572e-02, 4.3754e-02,
1.7538e-01, 5.3284e-02, -7.5663e-03,
1.9670e-01, -1.2397e-01, -1.6266e-01,
1.4575e-01, -5.7771e-02, 2.7619e-02,
2.2757e-02, -4.8910e-01, -2.6201e-01,
3.6513e-02, -2.0704e-01, -1.3225e-01,
-6.7533e-02, 1.1289e-02, 7.1316e-02,
-7.6847e-02, 6.8128e-02, 7.4717e-02,
1.1269e-01, 2.9978e-02, 3.2132e-02,
-5.4557e-02, -4.4599e-02, 4.1835e-02,
5.7964e-02, -2.1246e-03, 1.5007e-01,
1.8432e-01, 1.1463e-01, 2.2691e-01,
9.6166e-02, 4.7887e-02, -3.8399e-02,
5.8153e-02, -2.0255e-02, -1.1362e-01,
2.6402e-02, 2.5562e-02, 1.9096e-02,
1.1588e-01, 1.4540e-01, 1.1948e-01,
1.0360e-01, 5.9083e-02, 1.9263e-01,
1.6953e-01, 2.7390e-02, 9.7883e-02,
1.5059e-01, 6.7593e-02, -4.5843e-03,
8.7031e-02, -2.0926e-03, -6.3056e-02,
-6.6960e-02, -5.2056e-02, -7.3570e-02,
1.4361e-02, 1.1059e-01, -4.9720e-02,
4.4270e-02, 3.9995e-02, 4.3101e-03,
-1.1042e-01, 4.5028e-02, -8.9124e-02,
-1.2906e-01, -7.6972e-02, -6.5449e-03,
-1.9269e-01, 2.8349e-01, 1.1573e-01,
-1.7983e-01, 9.7615e-02, 9.4003e-03,
-4.7802e-02, -1.5889e-01, -1.2693e-01,
7.4717e-02, 2.8655e-01, -7.2637e-02,
1.5837e-02, 8.7125e-02, -1.2198e-01,
-1.7754e-02, -5.6443e-02, -9.8661e-03,
6.3040e-02, 2.0249e-02, -3.5368e-02,
9.7756e-03, 2.6760e-02, -5.5172e-02,
-1.0406e-02, 4.8313e-02, 2.4717e-02,
-5.2851e-02, 6.8496e-02, -2.5933e-02,
4.5932e-02, 5.9892e-02, 1.9200e-02,
-5.1316e-40, -5.1811e-40, -1.5144e-40,
-6.7758e-38, -5.4608e-40, -3.9680e-40,
-1.9155e-39, 2.0423e-41, 1.5256e-41,
-2.5559e-08, -3.2461e-08, -2.6821e-08,
-3.6885e-08, -4.6896e-08, -3.9086e-08,
-3.4305e-08, -4.4160e-08, -3.7187e-08,
-3.7416e-40, 3.6550e-40, 5.0727e-40,
-1.6722e-40, 3.9228e-40, 5.4548e-40,
-5.7512e-40, -2.8156e-40, 9.4571e-41,
-4.7040e-40, -1.6974e-40, 6.3849e-40,
-3.7322e-40, 2.6014e-40, 2.3080e-40,
-2.8395e-40, -3.7116e-40, 4.4393e-40,
1.1597e-40, 4.3291e-40, 3.8219e-40,
3.3393e-40, 3.1747e-40, -1.8400e-36,
-5.5215e-40, 1.7648e-40, -1.6540e-35,
-3.0953e-40, 5.3063e-40, -1.6454e-40,
2.1341e-40, 2.0790e-40, -3.0226e-40,
-2.6807e-40, -1.6601e-40, 5.1829e-40,
-1.8897e-40, -4.5956e-41, 5.3784e-40,
-2.5661e-40, -2.1726e-40, 1.2010e-40,
1.8263e-41, 1.1214e-40, -3.7693e-40,
-4.2596e-40, 1.8854e-40, 5.5010e-40,
-6.6262e-40, -4.8808e-40, 3.3123e-40,
5.9379e-41, 2.3249e-40, 4.4504e-40,
-8.4836e-04, -8.4397e-04, -5.8640e-04,
-8.3506e-04, -8.0192e-04, -5.3901e-04,
-8.3539e-04, -7.8069e-04, -4.8720e-04,
-3.4706e-04, -4.4640e-04, -5.2353e-04,
-4.4518e-04, -5.3374e-04, -5.2734e-04,
-5.8780e-04, -5.8730e-04, -5.4362e-04,
-5.2452e-04, -5.4578e-04, -5.6266e-04,
-4.2387e-04, -4.4643e-04, -4.8936e-04,
-3.5880e-04, -3.7886e-04, -4.1998e-04,
-2.4479e-04, -4.0736e-04, -3.1189e-04,
-3.4922e-04, -4.0173e-04, -2.5042e-04,
-5.7091e-04, -5.2665e-04, -2.3293e-04,
-2.8505e-04, 9.7283e-05, 3.1209e-04,
-2.7463e-04, 1.8704e-04, 4.4351e-04,
-9.1436e-05, 3.2602e-04, 5.7573e-04,
-4.0112e-04, -4.2566e-04, -2.4300e-04,
-9.9362e-05, -6.5499e-05, 3.2872e-05,
1.1584e-04, 2.3417e-04, 3.4427e-04,
-7.5767e-05, 3.9768e-06, 6.2201e-05,
2.3151e-05, 2.5595e-04, 3.4038e-04,
-1.3871e-05, 3.0295e-04, 4.4170e-04,
-1.7802e-04, -4.5376e-04, -5.1847e-04,
-5.0687e-04, -5.5837e-04, -2.5917e-04,
-5.3992e-04, -7.1375e-04, -4.8728e-04,
-1.7543e-01, -3.4151e-01, -3.2619e-02,
-1.9701e-02, -1.5494e-01, -1.6534e-01,
3.5632e-02, -1.0897e-01, -3.8379e-02,
-6.1420e-02, -1.0735e-01, 1.4730e-01,
7.4386e-02, -1.0487e-01, 7.9646e-02,
1.7130e-02, 4.4391e-02, -5.1959e-03,
4.5682e-02, -1.1543e-01, 9.4035e-03,
-3.4376e-01, -1.1961e-01, 1.0099e-01,
1.1335e-01, 7.5840e-02, 1.0675e-01,
4.9539e-02, 8.7406e-02, 4.4951e-02,
1.8111e-01, 2.6406e-01, -1.5924e-02,
-1.1464e-01, 8.4579e-04, -6.6811e-02,
-8.9635e-03, 1.8236e-03, 3.6561e-02,
-7.0281e-02, 2.9717e-01, 3.1836e-02,
-1.3647e-01, -6.5627e-02, 9.3063e-02,
-2.1851e-01, -6.0226e-02, -1.0326e-01,
5.3441e-02, 1.9103e-01, -5.7999e-02,
-3.3512e-02, 1.5496e-01, -1.1111e-01,
2.3256e-03, -1.5004e-01, -9.1248e-02,
-9.7706e-02, 1.9549e-01, -1.5403e-01,
-1.5327e-01, 8.3335e-02, 5.6111e-03,
-1.5707e-01, 8.0277e-03, -7.3955e-02,
-1.4111e-01, -1.3548e-01, -1.0563e-01,
2.3054e-01, -2.1822e-02, -6.6938e-03,
-1.0259e-01, 4.3577e-02, -1.7630e-01,
1.6484e-01, 4.2413e-01, 6.9475e-02,
-2.4705e-01, 2.5757e-01, -9.5611e-02,
1.0236e-01, -3.4820e-02, -6.8818e-03,
-1.1434e-01, -3.1800e-01, 2.1337e-02,
-1.9939e-01, -2.6532e-01, 7.3361e-02,
6.5939e-02, 9.5812e-02, -7.0156e-02,
-1.6249e-02, -1.5927e-02, -1.1189e-01,
-9.3936e-03, -1.0933e-01, -2.9399e-02,
-2.8752e-02, -4.5613e-02, -1.2718e-02,
3.8781e-01, 2.6776e-01, -1.0373e-02,
-2.3927e-02, -6.4398e-02, 9.9117e-02,
-6.0732e-02, -5.5917e-03, 5.1716e-02,
-1.4168e-01, 1.7661e-01, -5.5893e-02,
-3.0419e-01, -3.5537e-01, 2.1978e-01,
-1.8610e-01, -5.7743e-03, 3.2649e-02,
1.9975e-01, 1.6508e-01, 1.3808e-02,
1.0733e-01, 1.4722e-01, 5.8671e-02,
6.4940e-02, 1.6114e-01, 3.9697e-02,
1.1530e-01, 2.4021e-01, -2.1669e-01,
6.0220e-02, 2.0257e-01, -1.5227e-01,
-6.1096e-02, 6.6511e-02, -1.3858e-01,
-6.5275e-02, 1.0891e-01, 8.2048e-02,
-6.7907e-02, 2.2863e-02, -1.0322e-01,
1.6542e-01, -1.4436e-01, 6.4125e-02,
-1.0378e-01, -3.2346e-01, -1.5123e-02,
3.8758e-03, 1.1006e-01, -4.4325e-02,
-1.0102e-01, -3.7699e-02, 9.2472e-02,
-6.8972e-02, -1.2308e-02, 1.6478e-01,
3.4351e-02, -1.7461e-02, 1.0301e-01,
-2.7125e-01, -5.6730e-02, -2.5989e-01,
-3.0163e-01, -1.4826e-01, -3.4955e-01,
-1.6259e-01, -1.6708e-01, -2.7964e-01,
-6.7134e-02, -2.2385e-01, 2.1776e-01,
-1.1351e-02, -3.7861e-01, 1.8687e-01,
4.0551e-02, 8.1943e-02, 1.0866e-01,
1.0273e-01, 1.1844e-01, -1.1852e-01,
2.6758e-02, -8.5806e-02, 5.9444e-02,
-5.1627e-02, 7.1636e-02, 2.2841e-01,
-3.7242e-03, 2.9723e-01, 1.1918e-01,
8.4994e-02, -3.5747e-01, 3.6148e-02,
9.9705e-02, -1.3736e-01, -6.0080e-02,
1.2370e-01, 5.0668e-02, -6.0246e-02,
6.0562e-02, -3.5068e-01, -3.2645e-01,
9.1020e-04, 6.6203e-02, -1.0770e-01,
1.9434e-02, 3.0018e-01, 2.8018e-01,
1.4021e-01, 2.7481e-01, 2.2868e-01,
4.8540e-02, 1.7719e-01, -4.5834e-02,
-9.6349e-02, -2.3008e-02, -1.4497e-01,
4.3053e-02, -1.0161e-01, 2.8750e-02,
-1.2594e-01, -1.0388e-02, -4.3966e-02,
7.5993e-02, -7.1609e-02, 1.4624e-02,
4.1110e-02, 7.1258e-02, -2.9109e-02,
-5.8698e-03, 1.2389e-01, 4.7648e-02,
-6.1585e-04, -4.4556e-02, -2.3373e-02,
-4.4883e-02, -7.7722e-02, -7.3635e-02,
-2.7750e-02, -1.5117e-03, -8.7368e-02,
2.5113e-02, 7.7490e-02, 2.9024e-02,
1.5426e-01, 2.5472e-01, 4.8057e-02,
-1.1969e-01, -1.1487e-01, -1.1802e-01,
-4.7392e-02, -4.2226e-02, 3.1968e-02,
-2.6717e-01, -5.0206e-02, 8.1946e-04,
-4.0426e-02, 1.4373e-01, -3.3121e-03,
-4.5292e-02, -2.4538e-02, 1.0377e-01,
-1.7780e-02, 2.0058e-01, -2.4343e-02,
-1.1714e-02, 1.5984e-01, -1.2638e-01,
6.4655e-02, 3.7703e-02, 3.7970e-02,
9.1864e-03, 1.1468e-01, -6.2760e-04,
-1.4812e-01, 6.5670e-03, 1.0765e-01,
1.5023e-01, -7.0594e-02, -1.3924e-01,
3.6016e-02, -3.9078e-02, -3.8950e-02,
1.8735e-02, -1.5573e-01, -1.2456e-01
}
,
{
4.8634e-02, -1.3617e-01, 6.1231e-02,
-7.0235e-02, -6.4110e-01, 1.5985e-01,
8.6151e-02, 1.1847e-01, 1.3819e-01,
-3.6017e-04, -3.2273e-02, -8.5485e-02,
-7.0804e-03, 2.1751e-01, 7.2575e-03,
-8.3606e-02, -1.4885e-01, -1.2702e-01,
4.0848e-41, 8.0934e-40, -1.8889e-40,
-3.9103e-40, -7.4709e-40, 3.8377e-40,
-2.4159e-40, -4.7610e-40, 7.7359e-40,
-8.6217e-05, -5.9763e-05, -4.0558e-05,
-7.4966e-05, -4.7074e-05, -3.1656e-05,
-9.8390e-05, -6.6833e-05, -4.7669e-05,
3.5375e-02, 2.8660e-02, 4.1277e-02,
1.6289e-01, -3.2199e-01, -1.7845e-02,
2.4659e-01, -3.9618e-02, 4.1065e-03,
2.7267e-02, 8.6819e-02, 9.5070e-02,
-7.2700e-02, -2.8826e-01, 1.1750e-03,
2.5259e-02, 2.4681e-03, 6.4737e-02,
7.3023e-03, 2.9631e-02, 1.0820e-02,
-2.1400e-02, 5.4244e-01, 1.5639e-01,
-1.7561e-01, 4.8947e-01, -8.8305e-02,
6.5073e-02, 3.4922e-01, 1.3483e-01,
1.4506e-01, -2.5472e-01, -7.2894e-02,
4.5945e-02, 1.4040e-01, 1.2148e-01,
-2.6932e-01, -1.1518e-01, -9.3158e-03,
-2.3961e-01, -1.2479e-01, -8.9796e-02,
1.8688e-02, -4.9267e-02, 7.7189e-02,
-7.3691e-02, 7.8186e-03, 1.3761e-02,
-1.5689e-01, 3.1138e-02, 3.9231e-02,
-4.3607e-03, 2.0813e-01, 5.5635e-02,
-6.7000e-41, 9.8995e-41, 3.0043e-40,
6.7190e-40, 4.0827e-40, 7.6057e-40,
4.2208e-40, 8.1141e-40, -3.3569e-40,
1.0179e-03, 5.1543e-04, 3.8076e-04,
7.3507e-04, 4.5432e-04, 3.7410e-04,
9.3014e-04, 6.7365e-04, 6.0051e-04,
-5.1998e-02, 6.5768e-02, 3.1603e-02,
-3.0198e-02, -3.1692e-02, -6.9299e-02,
1.7672e-02, 2.3766e-01, 5.7877e-02,
-5.7944e-02, 1.2624e-01, -1.4396e-01,
-4.1542e-02, 6.5110e-01, 1.0942e-01,
-1.3133e-01, 5.0538e-02, -2.7371e-02,
-3.7515e-02, 2.8703e-02, 1.2382e-03,
3.8542e-01, -2.2754e-02, 3.4459e-02,
3.0545e-01, -5.3817e-01, -2.1389e-03,
1.3888e-02, -2.2775e-01, -6.3692e-02,
-1.8430e-01, 5.8452e-02, 4.5764e-02,
-8.5045e-02, -1.7060e-01, -1.8565e-02,
-2.0384e-02, -3.3018e-02, -5.1135e-02,
-4.5789e-02, -1.8105e-01, 3.5419e-02,
-5.0081e-02, 8.7719e-02, 1.0373e-01,
-1.0033e-02, 7.0530e-02, -7.8012e-03,
8.4042e-02, 1.1982e-01, -9.6046e-02,
-6.4009e-02, -1.0711e-01, -1.3523e-01,
1.8868e-41, -7.0039e-40, -7.2568e-40,
1.7408e-40, -7.8143e-40, -6.8130e-40,
-6.3142e-40, -6.2560e-40, -7.4238e-40,
2.6297e-04, 7.0014e-05, -4.0981e-04,
2.6263e-04, 4.2811e-05, -4.9950e-04,
3.9795e-04, 1.2615e-04, -4.7660e-04,
7.5933e-02, 2.6295e-02, 2.7984e-02,
-5.5914e-03, -8.7981e-02, -9.2618e-02,
4.2725e-02, -3.1210e-01, 1.3412e-01,
5.2683e-02, 3.9891e-01, 2.9150e-02,
-6.6090e-02, 2.9455e-01, -1.9710e-01,
1.4546e-02, -2.5572e-02, 8.1125e-02,
1.2271e-01, 1.6097e-01, 4.5644e-02,
3.6101e-02, -1.7174e-02, 6.6110e-02,
1.5078e-01, 4.5180e-01, 7.7154e-02,
-5.9725e-02, 1.0185e-01, 1.1363e-03,
6.7791e-02, 1.7696e-02, 5.2638e-02,
3.3051e-02, -8.4049e-02, 1.4380e-01,
1.8744e-02, -2.0940e-01, -2.1424e-01,
-2.1329e-01, -1.3154e-01, -3.2572e-01,
1.1292e-01, 1.2361e-02, -1.5506e-01,
-1.0362e-02, 1.9955e-02, 4.2639e-02,
-2.1952e-02, -2.4682e-02, -2.4453e-02,
-2.5606e-02, -3.3580e-02, -3.6340e-02,
-5.0830e-40, 6.3797e-40, -5.2775e-40,
-7.7988e-40, -7.4579e-40, -5.1901e-40,
-3.8275e-41, -5.7607e-40, -1.3656e-40,
2.7164e-04, 5.9977e-04, 8.6886e-04,
3.0116e-04, 7.0106e-04, 1.0248e-03,
2.9177e-04, 6.4748e-04, 9.4825e-04,
6.6310e-02, 1.5240e-02, -5.3044e-02,
1.2545e-01, 5.0582e-02, 2.7358e-02,
1.9338e-01, 1.1377e-01, 4.6110e-02,
-3.1997e-02, 1.5171e-02, -4.9372e-02,
5.4615e-04, 1.7262e-01, -2.2081e-01,
8.4871e-02, 1.7824e-02, -3.6429e-02,
4.2821e-02, -1.0055e-01, 4.8927e-02,
1.2524e-01, 5.8859e-02, -2.0980e-02,
2.2897e-01, 1.7594e-01, 3.4239e-02,
1.0915e-01, 1.2088e-01, 1.0151e-01,
6.8449e-03, -1.5546e-01, 1.2024e-01,
4.9036e-02, -1.2245e-01, 4.6713e-02,
7.5083e-03, -4.8084e-02, 9.7731e-03,
4.8779e-02, 3.1848e-02, -9.3517e-02,
6.4595e-02, 3.9337e-02, -7.2343e-02,
3.9519e-02, 4.1867e-02, -5.0485e-02,
2.5257e-02, 1.4071e-01, 1.3606e-01,
1.7481e-01, 2.0210e-01, 1.7241e-01,
-7.6295e-40, -7.8460e-40, -4.1806e-41,
-7.9994e-40, -7.3271e-40, -6.2665e-40,
-7.9602e-40, -7.0226e-40, -7.4131e-40,
-4.5544e-04, -5.2379e-04, -7.0755e-04,
-3.3807e-04, -3.8123e-04, -5.3222e-04,
-3.1771e-04, -3.4586e-04, -4.8784e-04,
-3.5257e-02, -1.1866e-02, 1.9717e-02,
-6.0777e-02, -7.3127e-03, -3.2825e-02,
-1.4952e-01, 3.2117e-01, -6.3786e-02,
-1.0255e-02, 1.2961e-01, -8.6823e-02,
1.6994e-01, 4.7491e-01, 2.7135e-01,
2.8538e-03, 1.5572e-01, -3.3736e-02,
8.5996e-02, -1.0176e-02, 2.6629e-02,
7.3362e-02, -7.7525e-03, 5.6261e-02,
1.0819e-01, -2.5863e-01, -5.7146e-03,
-7.1781e-02, 2.8376e-03, 7.8298e-02,
1.3183e-01, 2.7149e-02, -9.9786e-02,
9.0491e-02, 8.7938e-02, -2.1882e-02,
4.1396e-03, -4.5816e-02, -7.8892e-02,
-6.3855e-03, 1.7502e-01, 1.2053e-01,
1.2492e-01, 6.1258e-02, -4.0516e-02,
-4.5409e-02, -4.5877e-02, -7.6414e-02,
-1.0573e-02, -1.2517e-01, -4.3991e-02,
-2.6447e-02, -9.5478e-02, -2.4735e-02,
-4.6548e-41, -1.6443e-40, -3.1221e-40,
-3.2675e-40, -2.7265e-40, -3.1190e-40,
-2.2065e-40, -2.5407e-40, -6.9511e-40,
-1.2727e-04, -2.6585e-04, -3.5516e-04,
3.4272e-05, -1.6810e-04, -3.1677e-04,
-5.5355e-05, -2.9924e-04, -4.3692e-04,
-5.6428e-02, 1.0771e-01, 1.0185e-01,
2.2948e-01, -7.8744e-02, 6.0768e-04,
-2.2355e-03, -2.0128e-03, -5.7317e-03,
-7.1232e-03, 1.0297e-01, 1.6872e-01,
1.9194e-01, -1.1578e-01, 1.0732e-01,
-8.6952e-02, 3.2901e-02, -6.6658e-03,
7.3979e-02, 8.3875e-02, -7.6372e-03,
1.9577e-01, 2.7391e-01, 4.5275e-02,
1.5610e-01, 2.3802e-01, 1.6555e-02,
1.3814e-01, 1.2870e-01, 9.1626e-02,
-4.6890e-02, -8.8734e-02, 7.8866e-02,
1.0027e-01, 2.2139e-01, 1.0050e-01,
-6.5845e-02, -1.0990e-01, -6.9896e-02,
4.1687e-02, 3.0631e-02, -8.8441e-02,
-1.1868e-01, 1.0836e-02, 2.5873e-02,
-1.7114e-02, 7.6295e-02, 1.5439e-02,
-2.4271e-02, 5.8538e-02, 9.8190e-02,
4.9742e-02, 8.7807e-02, 6.5871e-02,
-7.2669e-40, -7.5936e-41, -7.4975e-40,
-1.6984e-42, -1.7334e-40, -8.4954e-41,
-2.1556e-41, -1.5374e-40, -1.5515e-40,
-6.2626e-04, -7.2727e-04, -8.1665e-04,
-5.6584e-04, -6.1190e-04, -6.9584e-04,
-5.6278e-04, -5.8554e-04, -6.3554e-04,
8.1550e-02, -4.1817e-03, 1.2301e-02,
-4.5800e-02, 4.6708e-02, -8.7972e-02,
-2.9880e-01, 2.6456e-01, 3.9363e-03,
-3.0939e-02, -1.9921e-01, -3.8689e-03,
-8.6803e-02, 3.4857e-01, -1.0201e-01,
2.1597e-02, 1.4380e-02, 4.3448e-02,
7.1195e-02, 1.4980e-01, 3.8079e-02,
-1.2678e-01, -8.1274e-02, -4.3445e-02,
5.2482e-02, -1.8763e-01, 1.1557e-01,
-9.4614e-02, 5.4415e-02, -3.1485e-02,
-3.6451e-02, 1.4379e-01, 5.2291e-02,
-9.2069e-02, 9.5675e-02, -5.8433e-02,
7.5768e-03, -7.1280e-02, -1.4576e-01,
-1.4671e-01, -1.2446e-01, -1.5207e-01,
-5.4368e-02, 3.8303e-02, -8.1794e-02,
2.0492e-02, 4.0910e-02, 1.1379e-02,
3.1582e-02, 3.6039e-02, -4.4040e-03,
1.7540e-02, 1.4097e-04, -6.4367e-02,
-7.9553e-40, -5.3941e-40, -7.1912e-40,
-5.8099e-40, -6.8315e-40, -6.6012e-40,
-7.6242e-40, -5.4784e-40, -7.0267e-40,
-2.9197e-04, -2.1994e-04, -1.9501e-04,
-2.6516e-05, -1.2642e-05, -8.4345e-05,
1.6763e-04, 1.1268e-04, -5.4516e-05,
-3.8007e-03, -6.8765e-02, -9.5716e-02,
6.3091e-02, -8.1971e-02, -9.2895e-02,
-6.8353e-03, 7.3639e-02, 1.3505e-01,
9.0083e-02, 2.4352e-01, 3.9708e-02,
-5.4051e-02, -6.8748e-02, -1.8937e-01,
-1.9808e-03, -7.1337e-02, -2.8316e-02,
8.1504e-02, 8.3226e-03, 6.9013e-03,
9.4393e-02, 5.9322e-02, 5.5023e-02,
1.0236e-01, -4.0205e-02, 3.5172e-02,
6.5381e-02, 4.9075e-02, -5.3931e-02,
4.3961e-02, 9.0223e-03, -4.1678e-02,
-6.4262e-02, -5.0304e-02, -9.3597e-02
}
,
{
3.8496e-01, 1.4287e-01, 3.4530e-02,
-5.5398e-01, -6.0381e-02, 1.2078e-02,
7.9983e-02, 2.1478e-01, -5.7915e-02,
-1.4020e-01, -2.6914e-02, 1.5915e-02,
1.2371e-01, 2.5496e-01, -2.9867e-02,
1.3269e-02, -9.9596e-02, -2.3173e-01,
5.1471e-02, -4.5507e-01, -7.7620e-02,
-5.1328e-02, -1.9808e-02, -4.7051e-02,
3.0573e-02, 7.8762e-02, -7.2627e-02,
6.8690e-02, -4.0125e-02, 5.6657e-02,
8.0208e-02, -2.0075e-02, 1.4019e-01,
-5.7959e-02, -7.3152e-02, 2.0202e-02,
-8.8702e-02, -1.9911e-01, -1.5570e-01,
2.8401e-02, 5.8802e-02, 1.3050e-01,
2.1905e-02, -3.4298e-02, 4.0447e-02,
1.0184e-01, -9.0101e-02, -9.2770e-02,
1.1713e-02, -3.2514e-01, 1.9393e-01,
-9.4227e-02, 2.7053e-01, -9.7233e-02,
-1.0478e-01, 6.0652e-02, 8.3399e-02,
1.1104e-01, 2.9008e-01, 4.9208e-02,
-1.5414e-02, 3.1718e-02, -7.9083e-02,
-5.2358e-03, 9.0101e-02, 5.2973e-02,
5.5527e-02, -1.6599e-02, -8.5167e-02,
-5.1018e-02, 7.2243e-03, -9.5684e-02,
-5.0608e-02, -6.7864e-02, -8.9496e-02,
-2.4348e-01, 2.7477e-01, -1.7588e-01,
1.3927e-01, 5.5502e-02, -1.3370e-02,
-4.3509e-02, -2.1511e-01, -5.9070e-02,
1.0293e-01, 4.2678e-01, -8.7527e-02,
-6.8546e-02, -5.6296e-02, -8.7962e-02,
-8.6130e-02, 9.2069e-02, 7.2303e-02,
2.4365e-02, 2.1988e-01, -7.9408e-03,
-3.0063e-02, 1.1554e-01, -5.0311e-02,
1.0605e-02, 5.4598e-02, 1.3826e-02,
-1.4342e-02, 1.5353e-01, -5.3974e-03,
1.5583e-01, -6.0889e-02, -1.5772e-02,
-2.5956e-02, -3.5285e-01, -2.0338e-01,
2.6011e-01, 2.2737e-01, -1.4693e-01,
-7.7964e-02, 1.0053e-01, -5.4278e-02,
-3.0668e-02, 3.4556e-02, -3.4321e-02,
7.8695e-02, -2.2357e-01, 9.5733e-02,
1.7483e-01, -1.5153e-01, -1.8262e-03,
4.7605e-02, -2.2834e-01, 4.6383e-02,
1.5701e-01, 3.2264e-01, 1.0334e-02,
6.3351e-02, 1.1340e-01, 8.3478e-02,
6.4196e-02, 3.3460e-02, 8.8473e-02,
5.4663e-02, -1.7665e-03, -4.1935e-02,
-6.1346e-03, -5.4463e-02, -6.2960e-02,
2.8159e-02, 2.9903e-02, 9.2429e-03,
-3.0041e-02, -9.7783e-02, -4.9500e-02,
9.5350e-02, -7.9143e-02, -1.3244e-01,
-6.5129e-02, 1.4568e-01, 6.6843e-02,
1.5241e-01, -7.8736e-02, 1.0721e-01,
-5.9015e-02, 1.5320e-01, 3.0796e-01,
-5.4266e-03, -6.0804e-02, 3.7326e-02,
7.4844e-02, 4.8340e-02, 1.5251e-01,
3.8158e-02, 1.2087e-01, -8.9003e-02,
-5.8369e-02, -7.3813e-02, 1.2240e-02,
-4.5106e-03, 7.4580e-02, 1.2042e-01,
4.1959e-02, 1.4529e-01, 5.3636e-03,
-4.9708e-03, -1.0775e-02, -5.9374e-02,
1.5358e-02, 1.7277e-02, -1.5412e-01,
8.1647e-02, 3.3503e-02, -8.1934e-02,
-1.5807e-02, -1.0001e-02, -1.0059e-02,
-9.0493e-03, -7.8954e-02, 4.3891e-02,
-9.3815e-03, 3.2241e-02, 4.7962e-02,
-7.2252e-03, 7.9324e-02, 2.0662e-02,
-5.7710e-02, -5.1142e-02, -1.4296e-01,
2.1501e-02, -1.9518e-02, -2.7658e-02,
1.4983e-01, 8.5447e-02, 7.2092e-04,
1.1275e-01, 6.1131e-02, 5.7955e-02,
1.5624e-02, 2.7225e-01, 1.1716e-01,
-1.6322e-04, -1.3368e-04, -1.5575e-04,
-1.0525e-04, -1.0765e-04, -1.5306e-04,
-8.9692e-05, -1.0857e-04, -1.7316e-04,
-1.8015e-03, -1.3733e-03, -3.9154e-04,
-1.8453e-03, -1.4238e-03, -4.4163e-04,
-1.5511e-03, -1.1131e-03, -2.0087e-04,
-2.4082e-03, -2.2576e-03, -1.9231e-03,
-2.4913e-03, -2.4136e-03, -2.1678e-03,
-2.5057e-03, -2.4650e-03, -2.2732e-03,
-2.3901e-05, -1.5870e-05, -5.8255e-06,
-1.5163e-05, -1.2370e-05, -6.0712e-06,
-1.3098e-05, -1.1132e-05, -5.7866e-06,
-5.9760e-03, -5.9998e-03, -6.0295e-03,
-5.9962e-03, -6.0100e-03, -6.0277e-03,
-6.0003e-03, -6.0059e-03, -6.0148e-03,
-3.2764e-05, -2.9574e-05, -2.8001e-05,
-1.0846e-05, -1.1569e-05, -1.4282e-05,
-1.6255e-06, -2.5666e-06, -4.7808e-06,
-5.1999e-03, -5.2334e-03, -5.2847e-03,
-5.2057e-03, -5.2283e-03, -5.2713e-03,
-5.2195e-03, -5.2321e-03, -5.2633e-03,
-3.0782e-06, -9.2118e-06, -1.6177e-05,
-1.6382e-06, -6.9559e-06, -1.4245e-05,
-1.1471e-06, -6.5984e-06, -1.4903e-05,
7.7574e-02, -1.2866e-02, 4.1348e-03,
-6.7298e-02, -1.3691e-01, 6.4079e-02,
3.7962e-02, 8.7737e-02, -4.1046e-02,
-2.8471e-02, 1.7647e-01, 6.4232e-02,
1.2316e-01, 3.6800e-01, -1.5740e-01,
-6.0839e-02, 1.5449e-02, -1.0761e-01,
-6.6869e-02, -1.2867e-01, -4.0195e-02,
-4.9651e-02, -5.5500e-02, -2.5879e-02,
2.0179e-02, 6.8467e-02, 2.6575e-02,
-6.7728e-04, -7.6269e-02, 2.3470e-02,
7.1869e-02, -1.1855e-01, -2.1067e-02,
1.3263e-01, -3.2957e-02, -3.4365e-03,
8.1936e-02, 1.3073e-01, 1.1477e-01,
1.2429e-01, 1.6129e-01, 1.6251e-01,
1.5476e-02, 3.2862e-02, 2.1999e-02,
-2.9189e-02, -3.3615e-02, 5.5616e-04,
-2.4059e-02, -9.6181e-03, -4.1175e-02,
-6.3680e-04, -9.6559e-02, -9.1448e-02,
3.0238e-02, 1.2534e-01, 1.5256e-02,
-4.2118e-02, 1.5723e-01, 2.6929e-03,
1.9873e-02, 5.3050e-02, -1.0153e-03,
2.0634e-02, 9.2825e-03, -6.8027e-03,
3.1335e-03, -7.7443e-03, -1.8307e-02,
7.9974e-03, -1.0283e-03, -6.2520e-03,
4.5050e-02, 9.9504e-02, -1.3404e-01,
-6.7271e-01, -5.7290e-02, 2.6919e-02,
2.3673e-01, 2.4688e-02, -2.0227e-02,
5.1389e-02, -3.9810e-02, -8.9700e-02,
2.8445e-02, 3.9136e-01, -1.1508e-01,
-1.0449e-01, -6.2005e-02, 6.5721e-02,
-1.9123e-01, -4.2613e-02, 3.5371e-02,
1.9207e-01, 8.7916e-02, 4.8089e-02,
-5.7912e-02, 1.0014e-01, -9.4659e-02,
1.1240e-02, -6.2254e-03, 1.3399e-01,
1.6483e-01, -3.5079e-01, 1.1612e-02,
2.9215e-01, 5.6875e-02, 6.9505e-02,
1.3721e-02, 1.2607e-01, 2.6426e-02,
-2.0529e-01, 2.1768e-01, 2.1232e-01,
-6.3574e-02, 2.3504e-02, -1.0811e-01,
-1.3470e-02, -3.6446e-02, -5.4379e-02,
-1.3257e-01, -8.3412e-02, 3.7745e-02,
5.8778e-02, -2.6060e-01, 3.8262e-02,
-4.3689e-03, -6.6703e-02, -2.2025e-01,
-9.0961e-02, 1.3855e-01, 3.4573e-04,
-2.9613e-01, -3.6138e-02, -1.3827e-01,
4.5896e-02, -5.3871e-02, -1.0037e-01,
1.8457e-01, 1.0338e-01, -5.7306e-02,
5.5510e-02, -9.4938e-02, -5.6527e-05,
1.6372e-01, -3.3854e-02, 5.6332e-02,
-4.0251e-01, -5.9428e-02, -9.1470e-02,
-1.5921e-02, -5.7948e-02, 8.1682e-03,
-3.7833e-03, 1.6293e-01, 5.3784e-02,
1.1053e-01, -1.3867e-01, 2.6772e-02,
-1.3133e-02, 3.7614e-01, 3.6361e-03,
-1.4205e-01, 3.1312e-02, -9.9928e-02,
-1.5755e-01, 4.2016e-01, 9.4065e-02,
2.7536e-02, 1.2620e-01, -1.4894e-01,
-4.2137e-02, -9.8700e-02, -1.7479e-01,
4.5836e-02, 5.3893e-02, -1.0138e-01,
8.3609e-02, 2.1849e-02, -1.0648e-01,
7.4801e-02, -1.2671e-01, -1.5007e-02,
2.7440e-01, -3.1351e-01, 6.5787e-02,
-6.7820e-02, 1.6312e-01, -1.3254e-02,
-2.5770e-02, -2.0041e-02, 5.8243e-02,
1.6055e-02, 1.1971e-02, -4.6112e-02,
-1.6276e-01, -1.5313e-02, -7.9826e-03,
9.1668e-02, 9.7722e-02, 1.3754e-01,
-7.4817e-02, -4.1923e-01, -1.2337e-01,
1.3472e-01, -4.0745e-02, -5.4055e-02,
-1.2943e-02, 4.8796e-02, 4.2007e-02,
9.4668e-02, 8.6149e-02, 1.2362e-01,
7.0637e-02, 2.3565e-01, 1.4582e-01,
5.6904e-02, -8.2166e-02, 1.0563e-01,
9.3969e-02, -2.2909e-01, 4.6537e-02,
6.5257e-02, 1.4804e-01, -6.2092e-02,
-1.5699e-02, -1.5303e-02, 1.6671e-01,
-6.1947e-03, 2.5749e-01, 1.5257e-01,
3.2908e-02, -5.9907e-02, 1.1502e-01,
7.5876e-02, -2.6699e-01, -1.5891e-02,
-8.0426e-02, 1.3406e-01, -1.9881e-02,
3.5472e-02, -8.2140e-02, 1.6509e-02,
8.3390e-03, -7.8291e-02, -2.0754e-01,
3.4490e-02, 2.7913e-01, 5.9566e-02,
2.5288e-02, 1.1725e-01, -1.0356e-01,
-5.0955e-02, 9.2093e-02, -5.8477e-02,
4.4325e-02, 3.2973e-02, -1.9477e-01,
3.9582e-02, -8.6877e-02, -1.1753e-01,
3.0401e-02, -2.8757e-02, -2.5563e-02,
5.0741e-02, -3.5056e-01, -2.5584e-01,
9.1709e-02, -4.0932e-02, 2.3812e-01,
5.0945e-02, 4.9246e-02, 1.2738e-01,
5.1440e-03, 1.5703e-01, 5.5743e-02,
-3.9492e-02, 1.2114e-01, 2.0531e-02,
8.0800e-02, 2.6680e-03, -1.6660e-02,
1.0684e-01, 1.2308e-01, 1.7882e-02,
1.8280e-02, 1.0972e-01, -5.2912e-03
}
,
{
-1.3812e-02, -4.6271e-02, 7.3790e-02,
-6.3801e-02, -3.6817e-01, -1.7880e-02,
5.2986e-02, 1.8626e-01, 1.5645e-03,
1.2367e-02, -6.2923e-02, 3.0844e-02,
9.3623e-02, 1.9527e-01, -2.6366e-02,
-2.0837e-02, -3.4424e-02, 4.0256e-02,
4.1482e-02, 6.1795e-02, -1.1293e-02,
-8.9944e-02, -1.3608e-01, 1.8067e-02,
3.6974e-02, 5.2530e-03, -2.7474e-02,
1.1872e-05, 1.9000e-05, 2.0729e-05,
1.0139e-05, 1.6832e-05, 1.9392e-05,
6.5445e-06, 1.0973e-05, 1.3521e-05,
-5.3340e-02, 1.3108e-03, 4.0436e-02,
5.7068e-02, -2.7923e-02, -5.4781e-02,
-2.9293e-02, 2.7145e-02, 2.7340e-02,
5.3520e-03, 1.8766e-02, 4.0297e-01,
2.6473e-02, -3.4675e-02, -1.1783e-01,
-2.5038e-02, -1.7702e-02, -3.4908e-02,
1.4847e-02, 2.3237e-01, -6.3687e-02,
-6.5672e-02, -2.1888e-01, -1.7233e-02,
4.0608e-02, -6.9580e-02, -2.2200e-02,
5.8163e-02, 1.3695e-01, -2.6257e-02,
-1.3328e-01, -3.5730e-01, 2.4507e-02,
-4.5611e-03, 2.0424e-01, -3.9821e-02,
5.5300e-02, -1.6006e-01, 1.1717e-01,
-2.6107e-02, -8.6995e-02, 8.3720e-02,
7.5494e-02, 3.2189e-01, 1.5527e-01,
-6.6869e-02, 1.4469e-01, 5.1805e-02,
9.8760e-02, -1.6759e-01, -1.2350e-01,
5.7005e-02, 8.4904e-02, 8.9713e-02,
-1.4263e-02, 2.8914e-02, 3.2239e-02,
-2.4871e-02, 5.6014e-02, -4.4469e-02,
3.1209e-02, 1.3677e-02, -2.1052e-02,
-1.6548e-03, -1.8796e-03, -1.9883e-03,
-1.6186e-03, -1.8494e-03, -1.9670e-03,
-1.5841e-03, -1.8173e-03, -1.9345e-03,
3.5726e-02, 1.8013e-01, 1.6913e-02,
-1.2168e-01, -6.3848e-02, 3.0555e-02,
3.0269e-02, -1.0260e-01, -1.5259e-02,
-4.7375e-03, 5.5115e-02, 6.2642e-01,
9.9776e-03, -2.1988e-01, -2.0984e-01,
7.0470e-03, 6.3178e-02, -1.3607e-02,
1.1918e-01, -2.4081e-01, 1.7889e-01,
-1.0514e-01, 2.9220e-01, -1.3263e-01,
5.6091e-03, -4.1623e-02, 2.5589e-02,
-1.8496e-01, 2.7698e-02, -6.5768e-02,
2.9677e-01, 4.4163e-02, 5.8530e-02,
-1.1010e-01, -7.6787e-02, 3.9844e-02,
5.2113e-03, -1.8202e-02, 1.4129e-03,
-6.1402e-03, -2.7222e-01, 7.4690e-02,
1.9131e-02, 2.2753e-01, 1.9587e-02,
-2.7391e-02, 6.7917e-03, 2.0496e-03,
6.7333e-02, 7.8262e-02, 2.1110e-03,
-5.4519e-02, 3.0763e-02, 1.5628e-02,
9.5055e-02, 3.8855e-02, 1.2446e-02,
-1.5152e-01, 7.8124e-02, -1.2616e-02,
9.3100e-03, -1.6528e-02, -1.2873e-02,
-1.8377e-03, -1.9231e-03, -1.8930e-03,
-1.8058e-03, -1.8841e-03, -1.8678e-03,
-1.7387e-03, -1.7966e-03, -1.7781e-03,
-4.5122e-02, 1.7027e-03, -3.5534e-03,
8.5222e-03, 1.0130e-01, 4.7893e-02,
6.5574e-02, 7.2150e-03, -2.1820e-03,
-5.5105e-03, -1.8990e-01, 2.6527e-02,
6.6140e-03, 2.1537e-01, -2.2183e-02,
-8.0628e-03, 6.8398e-03, 9.4474e-03,
1.2239e-01, -1.3337e-01, 7.3391e-02,
-1.2205e-01, 1.3145e-01, -2.0063e-02,
2.2168e-02, 3.6097e-03, 2.7146e-02,
4.6717e-02, 2.1122e-02, 1.5491e-02,
-1.3077e-01, 1.1635e-01, 1.0849e-02,
8.0113e-02, -8.4028e-02, 1.2863e-03,
-2.9796e-02, -8.4537e-02, -2.6766e-03,
-7.7771e-03, -2.4274e-03, 8.6274e-02,
-2.0354e-02, 4.1245e-02, 8.4227e-02,
5.5894e-02, 1.0706e-01, 5.2965e-02,
-7.8731e-03, 5.5825e-01, 1.0373e-01,
-1.1975e-01, -2.0071e-02, -2.5286e-02,
-7.7477e-02, 5.3589e-02, -1.5710e-03,
-1.2753e-01, 2.5166e-01, 8.2205e-03,
-9.8349e-02, -4.9539e-02, -5.4941e-02,
-4.9916e-03, -4.9986e-03, -5.0660e-03,
-4.9770e-03, -4.9840e-03, -5.0543e-03,
-4.9997e-03, -5.0114e-03, -5.0809e-03,
6.1819e-02, 1.5061e-01, 1.1984e-02,
1.2905e-01, 2.5921e-01, 1.4768e-01,
4.5548e-02, 1.4902e-01, -4.8961e-03,
-1.3605e-02, 8.2896e-02, -4.1931e-01,
-2.2657e-02, 2.4768e-01, 2.6528e-01,
-1.1566e-02, -8.7819e-03, 4.3618e-02,
-3.4332e-02, -1.8392e-01, 4.4471e-02,
-3.7073e-02, -5.4620e-02, 1.0899e-01,
3.7891e-02, 9.9487e-02, 3.2383e-02,
-6.3628e-02, -5.0303e-03, 5.4617e-02,
-8.7802e-02, 2.1977e-01, -6.0249e-03,
6.3554e-02, -5.4291e-02, -2.6709e-02,
-1.5505e-02, -6.7104e-02, 3.8607e-02,
-1.1427e-01, -3.2524e-01, 4.0077e-02,
-6.5144e-03, 1.2313e-01, -2.7924e-02,
1.4265e-02, -3.8338e-02, 8.6780e-02,
1.5341e-01, 1.2174e-01, -7.3160e-02,
2.6326e-04, 7.3690e-02, 5.2187e-02,
-3.3114e-02, -3.6588e-02, 1.1635e-02,
-3.3521e-02, 1.0767e-01, -8.9125e-03,
-2.2431e-02, -4.5655e-03, 7.5531e-03,
6.7227e-04, 7.2856e-04, 7.3907e-04,
6.5335e-04, 7.0702e-04, 7.1233e-04,
6.1540e-04, 6.7286e-04, 6.7797e-04,
-3.1496e-02, 6.0514e-02, 4.2013e-02,
-2.8617e-02, 1.4846e-02, 4.0016e-03,
4.7006e-03, -4.0017e-02, -3.0411e-02,
-9.6037e-03, 8.8522e-02, 9.8616e-02,
4.1297e-02, -3.2645e-01, -7.6144e-03,
-1.0711e-02, 3.9324e-02, 4.0144e-02,
5.2899e-02, -7.8668e-02, -5.4798e-02,
-2.0428e-01, 5.7238e-02, -3.6937e-02,
-3.6103e-02, -8.2683e-02, -2.8101e-02,
8.2479e-02, 5.7766e-02, -1.2019e-01,
-3.8373e-01, 6.8272e-02, -1.1758e-02,
5.1129e-02, -2.7931e-01, 4.5608e-02,
-2.5151e-02, -5.0816e-02, 1.7231e-02,
-3.6376e-02, 1.5916e-01, 2.9192e-02,
-4.1947e-02, 5.3183e-02, -9.7289e-02,
4.6138e-02, 7.0842e-02, 1.6673e-02,
-1.7243e-03, 2.7203e-01, 3.8262e-02,
-1.4000e-01, -7.3793e-02, -2.0050e-02,
-1.8750e-02, -8.5319e-02, -3.0858e-02,
-5.9981e-02, 1.2729e-01, 1.4094e-02,
-5.4088e-02, -2.3694e-02, -9.7485e-03,
-4.7840e-03, -4.8359e-03, -4.8727e-03,
-4.7882e-03, -4.8380e-03, -4.8755e-03,
-4.7859e-03, -4.8321e-03, -4.8633e-03,
4.9511e-02, 1.0935e-01, -3.7430e-03,
1.1834e-01, 7.7243e-02, 4.3074e-02,
6.7446e-02, 2.9734e-02, -1.1276e-02,
-2.0080e-02, 1.3561e-01, -1.3455e-01,
-1.4505e-02, 2.2100e-01, 4.9635e-02,
-1.0040e-02, 3.4560e-02, -7.4607e-03,
-6.8873e-02, -5.6221e-02, 1.2255e-02,
-2.9198e-02, 7.1612e-02, 2.9402e-02,
4.1036e-02, 4.6417e-02, 6.0284e-03,
-6.5261e-02, 2.1426e-03, 2.4192e-02,
-1.6073e-03, -6.2222e-03, -1.8295e-02,
2.4952e-04, -2.0623e-02, -3.3064e-03,
5.9188e-02, -4.8839e-02, 7.9840e-02,
-6.7952e-02, -4.7191e-01, 1.5117e-01,
1.5668e-01, 2.4733e-01, 1.1354e-01,
1.7742e-02, -4.4059e-02, 9.5374e-03,
3.2049e-01, -1.3779e-01, 9.6608e-02,
8.4580e-02, 1.4293e-01, 6.1574e-02,
2.8777e-03, 7.8795e-02, -5.1902e-02,
1.2212e-01, 1.0321e-01, 3.2360e-02,
-9.6617e-02, 7.8941e-03, -7.0876e-02,
3.5869e-03, 3.5891e-03, 3.5923e-03,
3.5746e-03, 3.5840e-03, 3.5967e-03,
3.5785e-03, 3.5932e-03, 3.6080e-03,
1.5454e-03, 3.0582e-03, 4.3737e-02,
-5.9833e-02, -1.1247e-01, 4.4380e-02,
-1.3206e-01, 8.2778e-03, 4.7963e-02,
-4.3720e-02, -7.5722e-03, 2.0510e-01,
3.0133e-02, -4.0506e-01, 2.7867e-01,
5.5586e-02, 2.8926e-02, 1.3360e-03,
1.9490e-05, 3.3326e-01, -7.7241e-02,
-1.5648e-01, 1.5195e-01, -1.3995e-01,
8.6519e-02, 1.0447e-01, -4.1413e-02,
-3.8667e-03, 1.6159e-01, 1.1627e-01,
-2.2646e-01, -3.4758e-02, -6.7956e-03,
-3.2689e-01, 1.9606e-01, -9.1523e-02,
1.1238e-02, 1.5084e-03, 4.2113e-02,
-1.1154e-02, -3.6596e-01, -7.2252e-02,
6.6621e-02, 1.0188e-01, 4.1032e-01,
3.5892e-02, -4.8304e-02, 6.6142e-03,
1.3374e-01, 2.2720e-01, -7.1224e-02,
6.8952e-02, 2.0467e-01, 5.0251e-02,
-6.2016e-02, 2.2175e-01, -1.7764e-02,
2.7542e-02, 1.4905e-01, 3.6637e-02,
-7.2231e-02, 5.0271e-03, -7.1823e-02,
3.5760e-03, 3.5540e-03, 3.5692e-03,
3.5664e-03, 3.5490e-03, 3.5689e-03,
3.5671e-03, 3.5619e-03, 3.5864e-03,
2.7470e-02, -3.9752e-02, 4.1063e-02,
-2.4985e-02, -1.7969e-01, 8.2186e-02,
-5.4251e-02, -5.9651e-03, 2.5079e-02,
-2.1197e-02, 2.5426e-02, 1.3585e-01,
-1.3460e-02, -1.1377e-01, 1.2278e-01,
3.6533e-02, 1.2843e-02, 5.6219e-02,
5.8141e-04, 2.8354e-01, -6.2016e-02,
-1.0289e-01, 1.8724e-01, -9.9475e-02,
5.1193e-02, 7.5986e-02, -1.2951e-03,
-8.2587e-02, 1.8498e-01, 1.0891e-01,
1.3538e-01, -4.7728e-01, 1.0868e-01,
-8.6415e-02, -1.7061e-01, 1.0457e-02
}
};
static __device__ __constant__ const float HDNL3biasL[8][8] =
{
{
-0.1175, -0.0258, -0.0053, -0.0437, -0.0563, -0.1047, -0.3449, 0.0568
}
,
{
0.0339, -0.1738, 0.0061, 0.1565, -0.0316, -0.0016, -0.0032, -0.0554
}
,
{
-0.0508, -0.0609, 0.0347, -0.0802, -0.0438, 0.2512, -0.0491, -0.0259
}
,
{
0.0655, 0.0255, 0.0228, -0.0027, -0.0155, -0.0163, -0.0174, -0.1095
}
,
{
4.9947e-03, 5.3372e-03, -4.5286e-09, -1.3756e-03, 3.8858e-03, -4.4197e-02, 3.3970e-02, 2.8411e-02
}
,
{
-0.0396, 0.0007, 0.1735, 0.0109, 0.1177, 0.0919, 0.0567, -0.0005
}
,
{
0.0127, -0.0688, 0.1102, -0.0052, 0.1602, -0.0191, -0.0322, 0.0311
}
,
{
0.0063, 0.0093, 0.0729, 0.3734, 0.0006, 0.1915, 0.3186, 0.2636
}
};
static __device__ __constant__ const float HDNL3kernelsL10[4 * 8] =
{
-0.0967, -0.3094,
0.3537, 0.5705,
0.2547, 0.3360,
-0.0718, -0.0700,
-0.3013, -0.1602,
0.4520, 0.0495,
0.1564, 0.3773,
-0.0216, 0.4367,
-0.4855, -0.1972,
-0.2026, -0.4390,
0.3743, -0.1156,
0.4408, -0.3123,
-0.3577, 0.0753,
-0.3396, 0.0336,
0.1052, -0.4180,
0.0799, -0.3587
};
__global__ static void conv1To8HDNL0(
hipTextureObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float tl = tex2D<float>(srcImg, x - 1, y - 1);
float tc = tex2D<float>(srcImg, x, y - 1);
float tr = tex2D<float>(srcImg, x + 1, y - 1);
float ml = tex2D<float>(srcImg, x - 1, y);
float mc = tex2D<float>(srcImg, x, y);
float mr = tex2D<float>(srcImg, x + 1, y);
float bl = tex2D<float>(srcImg, x - 1, y + 1);
float bc = tex2D<float>(srcImg, x, y + 1);
float br = tex2D<float>(srcImg, x + 1, y + 1);
float4 c1234 = make_float4(
RELU(CHANNEL1TO8(0, 0)),
RELU(CHANNEL1TO8(1, 0)),
RELU(CHANNEL1TO8(2, 0)),
RELU(CHANNEL1TO8(3, 0))
);
float4 c5678 = make_float4(
RELU(CHANNEL1TO8(4, 0)),
RELU(CHANNEL1TO8(5, 0)),
RELU(CHANNEL1TO8(6, 0)),
RELU(CHANNEL1TO8(7, 0))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, hipBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, hipBoundaryModeZero);
}
__global__ static void conv1To8HDNL1(
hipTextureObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float tl = tex2D<float>(srcImg, x - 1, y - 1);
float tc = tex2D<float>(srcImg, x, y - 1);
float tr = tex2D<float>(srcImg, x + 1, y - 1);
float ml = tex2D<float>(srcImg, x - 1, y);
float mc = tex2D<float>(srcImg, x, y);
float mr = tex2D<float>(srcImg, x + 1, y);
float bl = tex2D<float>(srcImg, x - 1, y + 1);
float bc = tex2D<float>(srcImg, x, y + 1);
float br = tex2D<float>(srcImg, x + 1, y + 1);
float4 c1234 = make_float4(
RELU(CHANNEL1TO8(0, 1)),
RELU(CHANNEL1TO8(1, 1)),
RELU(CHANNEL1TO8(2, 1)),
RELU(CHANNEL1TO8(3, 1))
);
float4 c5678 = make_float4(
RELU(CHANNEL1TO8(4, 1)),
RELU(CHANNEL1TO8(5, 1)),
RELU(CHANNEL1TO8(6, 1)),
RELU(CHANNEL1TO8(7, 1))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, hipBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, hipBoundaryModeZero);
}
__global__ static void conv1To8HDNL2(
hipTextureObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float tl = tex2D<float>(srcImg, x - 1, y - 1);
float tc = tex2D<float>(srcImg, x, y - 1);
float tr = tex2D<float>(srcImg, x + 1, y - 1);
float ml = tex2D<float>(srcImg, x - 1, y);
float mc = tex2D<float>(srcImg, x, y);
float mr = tex2D<float>(srcImg, x + 1, y);
float bl = tex2D<float>(srcImg, x - 1, y + 1);
float bc = tex2D<float>(srcImg, x, y + 1);
float br = tex2D<float>(srcImg, x + 1, y + 1);
float4 c1234 = make_float4(
RELU(CHANNEL1TO8(0, 2)),
RELU(CHANNEL1TO8(1, 2)),
RELU(CHANNEL1TO8(2, 2)),
RELU(CHANNEL1TO8(3, 2))
);
float4 c5678 = make_float4(
RELU(CHANNEL1TO8(4, 2)),
RELU(CHANNEL1TO8(5, 2)),
RELU(CHANNEL1TO8(6, 2)),
RELU(CHANNEL1TO8(7, 2))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, hipBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, hipBoundaryModeZero);
}
__global__ static void conv1To8HDNL3(
hipTextureObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float tl = tex2D<float>(srcImg, x - 1, y - 1);
float tc = tex2D<float>(srcImg, x, y - 1);
float tr = tex2D<float>(srcImg, x + 1, y - 1);
float ml = tex2D<float>(srcImg, x - 1, y);
float mc = tex2D<float>(srcImg, x, y);
float mr = tex2D<float>(srcImg, x + 1, y);
float bl = tex2D<float>(srcImg, x - 1, y + 1);
float bc = tex2D<float>(srcImg, x, y + 1);
float br = tex2D<float>(srcImg, x + 1, y + 1);
float4 c1234 = make_float4(
RELU(CHANNEL1TO8(0, 3)),
RELU(CHANNEL1TO8(1, 3)),
RELU(CHANNEL1TO8(2, 3)),
RELU(CHANNEL1TO8(3, 3))
);
float4 c5678 = make_float4(
RELU(CHANNEL1TO8(4, 3)),
RELU(CHANNEL1TO8(5, 3)),
RELU(CHANNEL1TO8(6, 3)),
RELU(CHANNEL1TO8(7, 3))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, hipBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, hipBoundaryModeZero);
}
__global__ static void conv8To8HDNL0(
hipSurfaceObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H, int L)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float4 tl1, tc1, tr1, ml1, mc1, mr1, bl1, bc1, br1;
float4 tl2, tc2, tr2, ml2, mc2, mr2, bl2, bc2, br2;
surf2DLayeredread(&tl1, srcImg, __umul24(sizeof(mc1), x - 1), y - 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&tc1, srcImg, __umul24(sizeof(mc1), x), y - 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&tr1, srcImg, __umul24(sizeof(mc1), x + 1), y - 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&ml1, srcImg, __umul24(sizeof(mc1), x - 1), y, 0, hipBoundaryModeZero);
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), x), y, 0, hipBoundaryModeZero);
surf2DLayeredread(&mr1, srcImg, __umul24(sizeof(mc1), x + 1), y, 0, hipBoundaryModeZero);
surf2DLayeredread(&bl1, srcImg, __umul24(sizeof(mc1), x - 1), y + 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&bc1, srcImg, __umul24(sizeof(mc1), x), y + 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&br1, srcImg, __umul24(sizeof(mc1), x + 1), y + 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&tl2, srcImg, __umul24(sizeof(mc2), x - 1), y - 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&tc2, srcImg, __umul24(sizeof(mc2), x), y - 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&tr2, srcImg, __umul24(sizeof(mc2), x + 1), y - 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&ml2, srcImg, __umul24(sizeof(mc2), x - 1), y, 1, hipBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), x), y, 1, hipBoundaryModeZero);
surf2DLayeredread(&mr2, srcImg, __umul24(sizeof(mc2), x + 1), y, 1, hipBoundaryModeZero);
surf2DLayeredread(&bl2, srcImg, __umul24(sizeof(mc2), x - 1), y + 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&bc2, srcImg, __umul24(sizeof(mc2), x), y + 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&br2, srcImg, __umul24(sizeof(mc2), x + 1), y + 1, 1, hipBoundaryModeZero);
float4 c1234 = make_float4(
RELU(CHANNEL8TO8(0, 0)),
RELU(CHANNEL8TO8(1, 0)),
RELU(CHANNEL8TO8(2, 0)),
RELU(CHANNEL8TO8(3, 0))
);
float4 c5678 = make_float4(
RELU(CHANNEL8TO8(4, 0)),
RELU(CHANNEL8TO8(5, 0)),
RELU(CHANNEL8TO8(6, 0)),
RELU(CHANNEL8TO8(7, 0))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, hipBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, hipBoundaryModeZero);
}
__global__ static void conv8To8HDNL1(
hipSurfaceObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H, int L)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float4 tl1, tc1, tr1, ml1, mc1, mr1, bl1, bc1, br1;
float4 tl2, tc2, tr2, ml2, mc2, mr2, bl2, bc2, br2;
surf2DLayeredread(&tl1, srcImg, __umul24(sizeof(mc1), x - 1), y - 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&tc1, srcImg, __umul24(sizeof(mc1), x), y - 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&tr1, srcImg, __umul24(sizeof(mc1), x + 1), y - 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&ml1, srcImg, __umul24(sizeof(mc1), x - 1), y, 0, hipBoundaryModeZero);
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), x), y, 0, hipBoundaryModeZero);
surf2DLayeredread(&mr1, srcImg, __umul24(sizeof(mc1), x + 1), y, 0, hipBoundaryModeZero);
surf2DLayeredread(&bl1, srcImg, __umul24(sizeof(mc1), x - 1), y + 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&bc1, srcImg, __umul24(sizeof(mc1), x), y + 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&br1, srcImg, __umul24(sizeof(mc1), x + 1), y + 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&tl2, srcImg, __umul24(sizeof(mc2), x - 1), y - 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&tc2, srcImg, __umul24(sizeof(mc2), x), y - 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&tr2, srcImg, __umul24(sizeof(mc2), x + 1), y - 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&ml2, srcImg, __umul24(sizeof(mc2), x - 1), y, 1, hipBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), x), y, 1, hipBoundaryModeZero);
surf2DLayeredread(&mr2, srcImg, __umul24(sizeof(mc2), x + 1), y, 1, hipBoundaryModeZero);
surf2DLayeredread(&bl2, srcImg, __umul24(sizeof(mc2), x - 1), y + 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&bc2, srcImg, __umul24(sizeof(mc2), x), y + 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&br2, srcImg, __umul24(sizeof(mc2), x + 1), y + 1, 1, hipBoundaryModeZero);
float4 c1234 = make_float4(
RELU(CHANNEL8TO8(0, 1)),
RELU(CHANNEL8TO8(1, 1)),
RELU(CHANNEL8TO8(2, 1)),
RELU(CHANNEL8TO8(3, 1))
);
float4 c5678 = make_float4(
RELU(CHANNEL8TO8(4, 1)),
RELU(CHANNEL8TO8(5, 1)),
RELU(CHANNEL8TO8(6, 1)),
RELU(CHANNEL8TO8(7, 1))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, hipBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, hipBoundaryModeZero);
}
__global__ static void conv8To8HDNL2(
hipSurfaceObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H, int L)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float4 tl1, tc1, tr1, ml1, mc1, mr1, bl1, bc1, br1;
float4 tl2, tc2, tr2, ml2, mc2, mr2, bl2, bc2, br2;
surf2DLayeredread(&tl1, srcImg, __umul24(sizeof(mc1), x - 1), y - 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&tc1, srcImg, __umul24(sizeof(mc1), x), y - 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&tr1, srcImg, __umul24(sizeof(mc1), x + 1), y - 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&ml1, srcImg, __umul24(sizeof(mc1), x - 1), y, 0, hipBoundaryModeZero);
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), x), y, 0, hipBoundaryModeZero);
surf2DLayeredread(&mr1, srcImg, __umul24(sizeof(mc1), x + 1), y, 0, hipBoundaryModeZero);
surf2DLayeredread(&bl1, srcImg, __umul24(sizeof(mc1), x - 1), y + 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&bc1, srcImg, __umul24(sizeof(mc1), x), y + 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&br1, srcImg, __umul24(sizeof(mc1), x + 1), y + 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&tl2, srcImg, __umul24(sizeof(mc2), x - 1), y - 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&tc2, srcImg, __umul24(sizeof(mc2), x), y - 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&tr2, srcImg, __umul24(sizeof(mc2), x + 1), y - 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&ml2, srcImg, __umul24(sizeof(mc2), x - 1), y, 1, hipBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), x), y, 1, hipBoundaryModeZero);
surf2DLayeredread(&mr2, srcImg, __umul24(sizeof(mc2), x + 1), y, 1, hipBoundaryModeZero);
surf2DLayeredread(&bl2, srcImg, __umul24(sizeof(mc2), x - 1), y + 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&bc2, srcImg, __umul24(sizeof(mc2), x), y + 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&br2, srcImg, __umul24(sizeof(mc2), x + 1), y + 1, 1, hipBoundaryModeZero);
float4 c1234 = make_float4(
RELU(CHANNEL8TO8(0, 2)),
RELU(CHANNEL8TO8(1, 2)),
RELU(CHANNEL8TO8(2, 2)),
RELU(CHANNEL8TO8(3, 2))
);
float4 c5678 = make_float4(
RELU(CHANNEL8TO8(4, 2)),
RELU(CHANNEL8TO8(5, 2)),
RELU(CHANNEL8TO8(6, 2)),
RELU(CHANNEL8TO8(7, 2))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, hipBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, hipBoundaryModeZero);
}
__global__ static void conv8To8HDNL3(
hipSurfaceObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H, int L)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float4 tl1, tc1, tr1, ml1, mc1, mr1, bl1, bc1, br1;
float4 tl2, tc2, tr2, ml2, mc2, mr2, bl2, bc2, br2;
surf2DLayeredread(&tl1, srcImg, __umul24(sizeof(mc1), x - 1), y - 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&tc1, srcImg, __umul24(sizeof(mc1), x), y - 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&tr1, srcImg, __umul24(sizeof(mc1), x + 1), y - 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&ml1, srcImg, __umul24(sizeof(mc1), x - 1), y, 0, hipBoundaryModeZero);
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), x), y, 0, hipBoundaryModeZero);
surf2DLayeredread(&mr1, srcImg, __umul24(sizeof(mc1), x + 1), y, 0, hipBoundaryModeZero);
surf2DLayeredread(&bl1, srcImg, __umul24(sizeof(mc1), x - 1), y + 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&bc1, srcImg, __umul24(sizeof(mc1), x), y + 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&br1, srcImg, __umul24(sizeof(mc1), x + 1), y + 1, 0, hipBoundaryModeZero);
surf2DLayeredread(&tl2, srcImg, __umul24(sizeof(mc2), x - 1), y - 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&tc2, srcImg, __umul24(sizeof(mc2), x), y - 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&tr2, srcImg, __umul24(sizeof(mc2), x + 1), y - 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&ml2, srcImg, __umul24(sizeof(mc2), x - 1), y, 1, hipBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), x), y, 1, hipBoundaryModeZero);
surf2DLayeredread(&mr2, srcImg, __umul24(sizeof(mc2), x + 1), y, 1, hipBoundaryModeZero);
surf2DLayeredread(&bl2, srcImg, __umul24(sizeof(mc2), x - 1), y + 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&bc2, srcImg, __umul24(sizeof(mc2), x), y + 1, 1, hipBoundaryModeZero);
surf2DLayeredread(&br2, srcImg, __umul24(sizeof(mc2), x + 1), y + 1, 1, hipBoundaryModeZero);
float4 c1234 = make_float4(
RELU(CHANNEL8TO8(0, 3)),
RELU(CHANNEL8TO8(1, 3)),
RELU(CHANNEL8TO8(2, 3)),
RELU(CHANNEL8TO8(3, 3))
);
float4 c5678 = make_float4(
RELU(CHANNEL8TO8(4, 3)),
RELU(CHANNEL8TO8(5, 3)),
RELU(CHANNEL8TO8(6, 3)),
RELU(CHANNEL8TO8(7, 3))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, hipBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, hipBoundaryModeZero);
}
__global__ static void convTranspose8To1HDNL0(
hipSurfaceObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, hipBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, hipBoundaryModeZero);
uchar c = clamp(
mc1.x * HDNL0kernelsL10[0 + index] +
mc1.y * HDNL0kernelsL10[4 + index] +
mc1.z * HDNL0kernelsL10[8 + index] +
mc1.w * HDNL0kernelsL10[12 + index] +
mc2.x * HDNL0kernelsL10[16 + index] +
mc2.y * HDNL0kernelsL10[20 + index] +
mc2.z * HDNL0kernelsL10[24 + index] +
mc2.w * HDNL0kernelsL10[28 + index], 0.0f, 1.0f) * 255.0f + 0.5f;
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, hipBoundaryModeZero);
}
__global__ static void convTranspose8To1HDNL1(
hipSurfaceObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, hipBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, hipBoundaryModeZero);
uchar c = clamp(
mc1.x * HDNL1kernelsL10[0 + index] +
mc1.y * HDNL1kernelsL10[4 + index] +
mc1.z * HDNL1kernelsL10[8 + index] +
mc1.w * HDNL1kernelsL10[12 + index] +
mc2.x * HDNL1kernelsL10[16 + index] +
mc2.y * HDNL1kernelsL10[20 + index] +
mc2.z * HDNL1kernelsL10[24 + index] +
mc2.w * HDNL1kernelsL10[28 + index], 0.0f, 1.0f) * 255.0f + 0.5f;
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, hipBoundaryModeZero);
}
__global__ static void convTranspose8To1HDNL2(
hipSurfaceObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, hipBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, hipBoundaryModeZero);
uchar c = clamp(
mc1.x * HDNL2kernelsL10[0 + index] +
mc1.y * HDNL2kernelsL10[4 + index] +
mc1.z * HDNL2kernelsL10[8 + index] +
mc1.w * HDNL2kernelsL10[12 + index] +
mc2.x * HDNL2kernelsL10[16 + index] +
mc2.y * HDNL2kernelsL10[20 + index] +
mc2.z * HDNL2kernelsL10[24 + index] +
mc2.w * HDNL2kernelsL10[28 + index], 0.0f, 1.0f) * 255.0f + 0.5f;
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, hipBoundaryModeZero);
}
__global__ static void convTranspose8To1HDNL3(
hipSurfaceObject_t srcImg, hipSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, hipBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, hipBoundaryModeZero);
uchar c = clamp(
mc1.x * HDNL3kernelsL10[0 + index] +
mc1.y * HDNL3kernelsL10[4 + index] +
mc1.z * HDNL3kernelsL10[8 + index] +
mc1.w * HDNL3kernelsL10[12 + index] +
mc2.x * HDNL3kernelsL10[16 + index] +
mc2.y * HDNL3kernelsL10[20 + index] +
mc2.z * HDNL3kernelsL10[24 + index] +
mc2.w * HDNL3kernelsL10[28 + index], 0.0f, 1.0f) * 255.0f + 0.5f;
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, hipBoundaryModeZero);
}
void cuRunKernelACNet(const unsigned char* inputData, unsigned char* outputData, ACCudaParamACNet * param)
{
hipError_t err = hipSuccess;
hipChannelFormatDesc inoutChannelDesc = hipCreateChannelDesc(8, 0, 0, 0, hipChannelFormatKindUnsigned);
hipChannelFormatDesc tmpChannelDesc = hipCreateChannelDesc(32, 32, 32, 32, hipChannelFormatKindFloat);
hipExtent extent = make_hipExtent(param->orgW, param->orgH, 2);
const size_t W = 2 * param->orgW, H = 2 * param->orgH;
hipArray_t cuInputArray;
err = hipMallocArray(&cuInputArray, &inoutChannelDesc,
param->orgW, param->orgH);
CheckCudaErr(err);
hipArray_t cuArray1;
err = hipMalloc3DArray(&cuArray1, &tmpChannelDesc, extent,
hipArraySurfaceLoadStore | hipArrayLayered);
CheckCudaErr(err);
hipArray_t cuArray2;
err = hipMalloc3DArray(&cuArray2, &tmpChannelDesc, extent,
hipArraySurfaceLoadStore | hipArrayLayered);
CheckCudaErr(err);
hipArray_t cuOutputArray;
err = hipMallocArray(&cuOutputArray, &inoutChannelDesc,
W, H, hipArraySurfaceLoadStore);
CheckCudaErr(err);
struct hipResourceDesc resDesc;
struct hipTextureDesc texDesc;
memset(&resDesc, 0, sizeof(resDesc));
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeBorder;
texDesc.addressMode[1] = hipAddressModeBorder;
texDesc.readMode = hipReadModeNormalizedFloat;
texDesc.normalizedCoords = 0;
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = cuInputArray;
hipTextureObject_t inTex = 0;
err = hipCreateTextureObject(&inTex, &resDesc, &texDesc, NULL);
CheckCudaErr(err);
resDesc.res.array.array = cuArray1;
hipSurfaceObject_t surf1 = 0;
err = hipCreateSurfaceObject(&surf1, &resDesc);
CheckCudaErr(err);
resDesc.res.array.array = cuArray2;
hipSurfaceObject_t surf2 = 0;
err = hipCreateSurfaceObject(&surf2, &resDesc);
CheckCudaErr(err);
resDesc.res.array.array = cuOutputArray;
hipSurfaceObject_t outSurf = 0;
err = hipCreateSurfaceObject(&outSurf, &resDesc);
CheckCudaErr(err);
err = hipMemcpy2DToArray(cuInputArray, 0, 0, inputData,
param->orgW, param->orgW, param->orgH,
hipMemcpyHostToDevice);
CheckCudaErr(err);
dim3 dimBlock(16, 16);
dim3 dimGrid(
(param->orgW + dimBlock.x - 1) / dimBlock.x,
(param->orgH + dimBlock.y - 1) / dimBlock.y
);
dim3 dimGridout(
(param->orgW * 2 + dimBlock.x - 1) / dimBlock.x,
(param->orgH * 2 + dimBlock.y - 1) / dimBlock.y
);
switch (param->HDNLevel)
{
case 0:
RUNKERNEL(0)
break;
case 1:
RUNKERNEL(1)
break;
case 2:
RUNKERNEL(2)
break;
case 3:
RUNKERNEL(3)
break;
default:
RUNKERNEL(0)
break;
}
err = hipMemcpy2DFromArray(outputData, param->orgW * 2,
cuOutputArray, 0, 0, W, H,
hipMemcpyDeviceToHost);
CheckCudaErr(err);
hipDestroyTextureObject(inTex);
hipDestroySurfaceObject(surf1);
hipDestroySurfaceObject(surf2);
hipDestroySurfaceObject(outSurf);
hipFreeArray(cuInputArray);
hipFreeArray(cuArray1);
hipFreeArray(cuArray2);
hipFreeArray(cuOutputArray);
}
| 3335e8ea3b53a3b302237457ace031290fc45f69.cu | #include"CudaHelper.cuh"
#include"CudaInterface.hpp"
typedef unsigned char uchar;
#define RELU(x) fmaxf(x, 0.0f)
#define L2 0
#define L3 1
#define L4 2
#define L5 3
#define L6 4
#define L7 5
#define L8 6
#define L9 7
#define CHANNEL1TO8(n, Level) \
tl * HDNL##Level##kernelsL1[n * 9 + 0] + tc * HDNL##Level##kernelsL1[n * 9 + 1] + tr * HDNL##Level##kernelsL1[n * 9 + 2] + \
ml * HDNL##Level##kernelsL1[n * 9 + 3] + mc * HDNL##Level##kernelsL1[n * 9 + 4] + mr * HDNL##Level##kernelsL1[n * 9 + 5] + \
bl * HDNL##Level##kernelsL1[n * 9 + 6] + bc * HDNL##Level##kernelsL1[n * 9 + 7] + br * HDNL##Level##kernelsL1[n * 9 + 8] + HDNL##Level##biasL1[n]
#define CHANNEL8TO8(n, Level) \
tl1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 0] + tc1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 1] + tr1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 2] + \
ml1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 3] + mc1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 4] + mr1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 5] + \
bl1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 6] + bc1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 7] + br1.x * HDNL##Level##kernelsL[L][n * 72 + 0 * 9 + 8] + \
tl1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 0] + tc1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 1] + tr1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 2] + \
ml1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 3] + mc1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 4] + mr1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 5] + \
bl1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 6] + bc1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 7] + br1.y * HDNL##Level##kernelsL[L][n * 72 + 1 * 9 + 8] + \
tl1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 0] + tc1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 1] + tr1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 2] + \
ml1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 3] + mc1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 4] + mr1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 5] + \
bl1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 6] + bc1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 7] + br1.z * HDNL##Level##kernelsL[L][n * 72 + 2 * 9 + 8] + \
tl1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 0] + tc1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 1] + tr1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 2] + \
ml1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 3] + mc1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 4] + mr1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 5] + \
bl1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 6] + bc1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 7] + br1.w * HDNL##Level##kernelsL[L][n * 72 + 3 * 9 + 8] + \
tl2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 0] + tc2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 1] + tr2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 2] + \
ml2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 3] + mc2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 4] + mr2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 5] + \
bl2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 6] + bc2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 7] + br2.x * HDNL##Level##kernelsL[L][n * 72 + 4 * 9 + 8] + \
tl2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 0] + tc2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 1] + tr2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 2] + \
ml2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 3] + mc2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 4] + mr2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 5] + \
bl2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 6] + bc2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 7] + br2.y * HDNL##Level##kernelsL[L][n * 72 + 5 * 9 + 8] + \
tl2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 0] + tc2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 1] + tr2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 2] + \
ml2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 3] + mc2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 4] + mr2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 5] + \
bl2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 6] + bc2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 7] + br2.z * HDNL##Level##kernelsL[L][n * 72 + 6 * 9 + 8] + \
tl2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 0] + tc2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 1] + tr2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 2] + \
ml2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 3] + mc2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 4] + mr2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 5] + \
bl2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 6] + bc2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 7] + br2.w * HDNL##Level##kernelsL[L][n * 72 + 7 * 9 + 8] + HDNL##Level##biasL[L][n]
#define RUNKERNEL(Level) \
conv1To8HDNL##Level << <dimGrid, dimBlock >> > (inTex, surf1, param->orgW, param->orgH); \
conv8To8HDNL##Level << <dimGrid, dimBlock >> > (surf1, surf2, param->orgW, param->orgH, L2); \
conv8To8HDNL##Level << <dimGrid, dimBlock >> > (surf2, surf1, param->orgW, param->orgH, L3); \
conv8To8HDNL##Level << <dimGrid, dimBlock >> > (surf1, surf2, param->orgW, param->orgH, L4); \
conv8To8HDNL##Level << <dimGrid, dimBlock >> > (surf2, surf1, param->orgW, param->orgH, L5); \
conv8To8HDNL##Level << <dimGrid, dimBlock >> > (surf1, surf2, param->orgW, param->orgH, L6); \
conv8To8HDNL##Level << <dimGrid, dimBlock >> > (surf2, surf1, param->orgW, param->orgH, L7); \
conv8To8HDNL##Level << <dimGrid, dimBlock >> > (surf1, surf2, param->orgW, param->orgH, L8); \
conv8To8HDNL##Level << <dimGrid, dimBlock >> > (surf2, surf1, param->orgW, param->orgH, L9); \
convTranspose8To1HDNL##Level << <dimGridout, dimBlock >> > (surf1, outSurf, W, H);
inline __device__ float clamp(float f, float a, float b)
{
return fmaxf(a, fminf(f, b));
}
static __device__ __constant__ const float HDNL0kernelsL1[9 * 8] =
{
0.0609, 0.1027, -0.0447,
-0.1423, 0.7196, 0.1803,
0.0842, 0.0696, 0.0082,
0.0089, 0.1540, -0.8589,
0.0448, 0.8659, -0.2420,
-0.0364, 0.0585, 0.0125,
-0.1937, 0.7259, 0.0119,
-0.8266, 0.4147, 0.0088,
-0.0453, -0.0451, -0.0182,
0.0264, -0.9422, 0.1258,
-0.0543, 0.1282, 0.7102,
-0.0106, 0.0386, -0.0141,
0.2054, -0.0393, 0.1494,
0.3106, 0.5722, 0.2640,
0.1708, -0.1640, -0.0212,
0.0558, -0.2887, -0.1666,
0.3123, -0.3097, -0.2281,
0.2880, 0.3001, 0.0526,
-0.0320, 0.0584, -0.0193,
-0.0135, 1.0649, -0.1246,
0.0283, -0.3030, -0.6378,
-0.0040, -0.9122, 0.0181,
0.0365, 0.8947, -0.0420,
-0.0199, 0.0217, 0.0060
};
static __device__ __constant__ const float HDNL0biasL1[8] =
{
-0.7577, -0.0210, 0.0292, -0.0189, 0.0223, 0.0340, 0.0150, -0.0044
};
static __device__ __constant__ const float HDNL0kernelsL[8][9 * 8 * 8] =
{
{
2.0611e-01, 6.6865e-02, -9.9123e-02,
8.5279e-02, -4.5549e-02, -2.9491e-02,
-1.0358e-01, -2.4844e-02, -8.1539e-03,
-1.1308e-01, -6.4228e-02, -8.8081e-02,
2.7810e-02, -1.6054e-01, -1.1985e-01,
-2.8679e-01, -1.7785e-02, 1.1559e-01,
2.1614e-02, -6.8870e-02, -2.4707e-01,
9.6867e-02, -1.6561e-01, 2.8281e-02,
-8.2469e-02, -9.8554e-02, -1.7147e-02,
3.3710e-01, 9.2126e-02, 3.6880e-02,
5.7004e-02, 4.0175e-02, 1.6116e-01,
2.5629e-01, 5.1154e-01, 2.4119e-02,
1.9495e-02, 2.6940e-01, -1.4050e-01,
5.0325e-02, -4.5920e-02, -1.3586e-01,
5.9458e-02, 1.3860e-01, -2.1065e-01,
-1.0744e-01, -1.5915e-01, -1.1528e-02,
-1.1470e-01, 6.3455e-02, -5.5558e-02,
-6.9920e-02, -3.0142e-02, -4.9059e-02,
3.6421e-01, 3.0252e-01, -1.3562e-01,
1.5238e-01, -1.9868e-01, -3.2644e-02,
-4.2849e-02, 1.3677e-02, 7.3854e-02,
7.6609e-02, -1.0121e-01, 3.6319e-02,
9.3536e-02, 6.0386e-02, 1.0086e-01,
-2.6630e-01, 2.5875e-02, -1.9225e-01,
4.0687e-02, 1.1005e-01, 9.9578e-03,
1.6939e-01, 5.0872e-01, 8.9876e-02,
6.9561e-02, 1.1910e-01, -1.8091e-02,
-3.5739e-02, -7.5300e-02, -1.6788e-02,
3.0316e-02, 1.5942e-01, -9.0878e-02,
-6.3737e-02, 2.6141e-02, 8.8040e-03,
3.4954e-03, -6.6707e-02, 1.4551e-01,
7.6258e-02, 1.4893e-01, -1.5255e-01,
6.2442e-02, 2.2166e-01, 7.5327e-02,
5.4785e-02, -1.4503e-02, -1.5188e-03,
1.6748e-01, -5.2731e-03, -1.9900e-02,
4.4786e-02, -1.0669e-01, 1.3192e-01,
1.9961e-02, -8.1015e-02, -3.2264e-02,
1.0544e-01, 1.8844e-01, 7.4274e-03,
6.6729e-02, -7.8318e-02, 3.0775e-02,
-8.6109e-03, 7.4977e-02, 9.4079e-02,
-1.2726e-01, -2.9664e-01, 7.8153e-03,
-4.8413e-02, -1.8450e-01, -7.1065e-02,
-8.7609e-02, -7.7192e-02, 5.0919e-02,
-1.4021e-01, 3.5696e-01, 1.2079e-02,
-2.0318e-02, -1.8827e-02, 3.9084e-02,
-2.8654e-02, -6.4166e-02, 5.4889e-02,
8.2689e-02, 8.4463e-02, 2.2339e-02,
1.0805e-01, -1.2566e-01, 1.7109e-01,
-6.1338e-02, -3.4043e-02, 4.0473e-02,
6.3821e-02, 1.7626e-01, -5.8112e-02,
-9.5002e-02, 1.3327e-02, 1.2242e-01,
4.9008e-02, -4.3678e-02, 2.2362e-02,
-7.7903e-02, -3.8252e-02, -5.2271e-02,
-1.8884e-02, -1.2859e-01, 4.1172e-02,
-3.1181e-02, 3.2348e-02, -4.9081e-02,
-6.7966e-02, -2.4896e-02, -6.5323e-02,
8.0742e-02, 2.6093e-01, -2.4638e-01,
-8.0881e-02, -2.9643e-02, -7.9627e-02,
1.4020e-01, 2.1575e-01, 8.1244e-03,
2.1561e-01, -2.9305e-01, -2.5535e-02,
-8.5538e-02, -1.4456e-01, -7.5664e-02,
-3.9921e-02, 4.0659e-02, 1.7812e-01,
1.1580e-01, 5.6628e-02, 9.0008e-02,
-2.2384e-02, -1.9788e-02, -4.0547e-02,
1.0070e-01, 2.9581e-01, 1.9936e-01,
-1.1957e-01, -8.6508e-02, -8.2543e-04,
-5.2879e-02, 1.5486e-01, 1.0829e-02,
1.4716e-01, 3.4257e-01, -3.2058e-03,
-2.1687e-02, 5.8641e-02, -6.3806e-02,
-3.2607e-02, 7.3328e-02, -6.4738e-03,
-1.0031e-01, -1.7698e-01, -9.4201e-02,
-3.3644e-02, -3.5860e-01, -9.3200e-02,
-7.4142e-02, 5.5001e-02, 4.3741e-02,
-2.2447e-03, 1.1941e-01, -1.6135e-02,
-1.4764e-02, -1.0194e-02, 3.2540e-02,
-1.0588e-01, -2.3000e-01, -1.1557e-02,
-9.0254e-02, 2.3352e-01, -1.3622e-01,
-1.9256e-03, -5.3372e-02, 1.0314e-01,
-2.0100e-02, 1.0700e-01, 1.6108e-01,
2.8422e-02, 2.7909e-01, 3.8342e-01,
1.4025e-02, 9.0965e-02, 2.0218e-01,
3.3562e-03, 7.6652e-02, 4.5974e-02,
-1.3617e-02, -1.4014e-01, -1.9253e-02,
1.1020e-01, -1.9678e-01, 6.7123e-02,
-3.3294e-02, -1.3006e-01, -1.0111e-01,
5.5813e-02, 2.1127e-01, 2.0248e-02,
-9.6386e-04, -5.2497e-03, 1.1134e-01,
2.8910e-02, 1.2229e-01, 1.8439e-01,
1.6413e-02, 1.5870e-01, -1.1616e-01,
-1.6032e-03, -6.8258e-03, -2.1883e-02,
1.2052e-01, -2.1982e-02, -1.3088e-01,
2.8664e-02, -5.0670e-02, 2.2927e-01,
2.0461e-02, 7.7250e-03, -2.6630e-02,
-9.0406e-02, -1.4174e-01, 9.8969e-02,
-6.6573e-02, -2.4425e-01, -3.5126e-02,
9.3859e-02, 1.9058e-01, -1.6569e-01,
-4.9163e-03, 7.4149e-02, 6.3345e-02,
-1.7888e-02, -9.1876e-02, 1.3728e-01,
-9.6098e-02, -3.4814e-02, -1.0862e-02,
4.8031e-03, 2.5206e-01, 8.0316e-02,
1.5102e-01, 4.1236e-02, 2.2339e-01,
2.8500e-01, 1.5106e-01, 9.6321e-04,
-6.0741e-02, 3.5759e-02, -1.8829e-01,
-1.1295e-03, -6.2322e-02, 8.4974e-01,
-3.9817e-02, -2.0666e-01, 2.2961e-01,
3.6857e-02, -2.0211e-02, -9.3342e-02,
2.0827e-02, 6.8874e-02, -6.0287e-02,
-6.9724e-02, 1.4423e-01, -7.6017e-02,
1.4718e-02, 1.8990e-01, 1.1789e-01,
-1.5018e-01, -2.3071e-01, 1.7511e-01,
-7.7605e-02, 5.0621e-02, -1.0381e-01,
8.6845e-02, -1.2410e-01, -4.4669e-01,
2.7930e-02, -5.4713e-02, -7.7923e-02,
8.6000e-02, -2.6371e-02, -8.6541e-02,
-1.1521e-01, 1.4389e-01, 5.0507e-02,
-1.6618e-02, -2.5150e-01, -4.9759e-02,
7.7166e-02, 4.5033e-03, -5.4649e-02,
2.8548e-03, -2.8078e-03, 8.1129e-02,
-4.5973e-02, 3.6740e-03, 2.0746e-01,
-9.8191e-02, 1.2807e-01, 8.1950e-03,
1.4240e-01, 1.5104e-01, 6.9624e-02,
2.2309e-01, 2.5688e-01, 9.4766e-02,
6.2560e-02, 7.1347e-02, 4.1432e-02,
-3.1829e-02, 1.5207e-01, 2.0575e-02,
-1.2506e-01, 2.9274e-01, 9.4712e-02,
-2.0520e-01, 4.9894e-04, 5.6171e-02,
-4.1567e-03, 6.6753e-02, -1.5767e-01,
6.3768e-02, 8.3008e-02, -3.5639e-01,
4.4660e-02, 2.6996e-01, -6.4014e-02,
8.5475e-02, 1.7854e-02, -6.4079e-02,
1.8760e-01, 1.5285e-01, -3.5614e-02,
1.0747e-02, -3.1330e-01, -4.8664e-02,
7.2150e-02, 1.7570e-01, 1.6716e-01,
6.2431e-02, 2.3755e-01, 2.8554e-01,
3.5791e-02, 2.8185e-01, 1.5810e-01,
-4.0886e-02, 1.8833e-02, -8.2903e-03,
1.3994e-02, -1.0846e-01, 3.5315e-02,
-6.2674e-02, 6.2806e-02, 2.2168e-02,
-3.6236e-01, -2.5326e-01, 5.6331e-02,
9.8762e-02, 3.8049e-01, 5.9885e-02,
-3.0541e-02, 7.9855e-02, -5.8639e-02,
1.1104e-03, 1.7147e-02, 3.3115e-02,
-3.3663e-02, 7.4615e-02, 6.4211e-02,
-7.3441e-02, -1.5568e-01, 7.6546e-02,
6.1802e-02, -1.5300e-01, -1.8209e-02,
-9.2786e-03, 1.6622e-01, 1.1354e-01,
9.5865e-03, -2.4226e-02, -1.4750e-03,
-5.5294e-02, -1.1839e-01, 3.8867e-03,
1.7262e-01, 4.2743e-01, 6.8970e-02,
-2.0232e-01, -1.4564e-01, 2.3025e-02,
-2.6139e-03, -1.6907e-02, 1.1693e-01,
-9.4871e-03, 3.8488e-02, -4.8351e-02,
-9.2171e-02, 4.8227e-02, 9.7378e-02,
-1.0292e-01, -1.2084e-01, -9.6676e-02,
1.8103e-02, 3.0658e-01, -7.7755e-02,
-2.4362e-02, -1.9862e-01, -6.9665e-02,
8.2944e-03, -1.4680e-01, -1.7371e-02,
-1.6534e-01, 2.5752e-01, 1.1129e-01,
-9.4151e-02, -1.3225e-01, 1.5933e-01,
9.0723e-02, 5.5469e-02, -1.4091e-01,
8.3404e-02, 1.3741e-01, -3.5438e-02,
3.2681e-02, 2.8491e-02, 1.4278e-02,
2.3789e-01, -2.3687e-03, -5.3264e-03,
-1.1161e-01, 1.9351e-02, 5.0832e-02,
8.2246e-03, 2.9892e-02, -3.7197e-02,
4.8236e-02, 1.6945e-01, 1.3673e-01,
1.1236e-01, 7.2318e-01, -4.1618e-02,
2.7494e-01, 1.0081e-01, -8.5399e-03,
-5.6151e-02, 8.1212e-02, -7.5770e-02,
2.7872e-02, 9.4644e-02, 1.1175e-02,
-6.1539e-02, 7.7395e-02, -3.2495e-02,
-5.1640e-02, 2.1028e-03, 1.5825e-02,
-1.1004e-01, 2.3153e-01, -6.1653e-02,
-2.6497e-02, 5.9461e-01, 4.0865e-02,
-1.9956e-02, 7.9328e-02, -1.7002e-02,
-5.5930e-03, 5.2015e-02, 7.7945e-04,
1.0136e-02, -9.0111e-02, -1.1175e-01,
-3.1781e-02, 1.4686e-01, -7.5718e-03,
1.1036e-02, 2.4618e-01, 8.5951e-02,
3.4775e-02, -1.2184e-01, 1.8010e-01,
-3.6781e-02, -1.3912e-01, -4.9172e-02,
3.3064e-02, 5.0582e-01, 1.0713e-02,
-1.2934e-02, -1.7697e-01, -1.4954e-01,
2.2229e-02, -5.8568e-03, -5.0186e-02,
1.9648e-02, -1.1302e-01, 1.5629e-02,
-3.5015e-02, 9.5032e-02, -2.9677e-02,
9.5173e-02, -3.0330e-02, -3.7652e-02,
-2.6097e-03, 7.4723e-01, -7.6234e-03,
-3.8826e-02, 1.0191e-01, 3.6589e-03,
-2.6503e-02, -1.1133e-01, -2.2029e-02,
-1.9101e-01, -2.1108e-01, -7.4371e-02,
-7.9349e-02, -1.0405e-01, 5.0315e-02
}
,
{
-4.2606e-02, -8.9001e-02, -6.4006e-02,
1.1132e-01, 7.6609e-02, 8.6417e-02,
7.6477e-03, -1.6416e-02, -8.2094e-02,
1.0779e-01, 2.1837e-01, 1.8094e-01,
-2.6306e-02, -1.2452e-01, 1.2662e-02,
3.1633e-02, 1.8717e-02, 3.1043e-02,
4.0927e-02, 5.0311e-02, 1.1648e-01,
2.2429e-01, 2.0757e-01, 4.3662e-03,
3.6341e-02, -4.7637e-02, 8.3645e-02,
-8.9260e-03, 1.8507e-02, 7.9069e-02,
-1.9411e-01, -8.6847e-02, -3.6639e-03,
4.0328e-02, -3.6821e-02, -8.5387e-02,
5.8173e-02, 5.9991e-02, -3.1398e-02,
1.5818e-01, 3.0861e-01, -2.3818e-02,
1.2176e-01, 6.7520e-02, 8.9401e-02,
-2.8859e-02, -1.2237e-01, -1.0625e-01,
3.1675e-02, 1.4172e-01, -1.4373e-01,
1.4653e-02, 1.0205e-01, 6.2557e-02,
-8.7292e-02, -2.1255e-02, 3.6830e-02,
-5.4417e-02, 3.0501e-01, 1.6897e-01,
-2.2187e-02, -8.9609e-02, -2.2830e-02,
4.9846e-02, 3.3395e-01, -3.1561e-02,
-1.3191e-02, 4.2663e-01, -6.9727e-02,
1.4570e-02, -4.0002e-02, 5.6394e-02,
-8.2547e-02, 1.9249e-01, 1.5591e-01,
1.4536e-01, -1.0409e-01, 1.2382e-01,
1.8189e-01, 9.2917e-02, -1.4394e-01,
-5.6260e-02, -2.7043e-01, 1.5392e-02,
-1.4305e-02, 1.1131e-01, -8.5913e-02,
7.7914e-02, -6.5484e-03, -1.8375e-01,
-1.4059e-01, -5.7339e-01, -3.9073e-02,
-1.1701e-01, -3.1806e-02, 7.7726e-02,
2.1688e-02, 9.9297e-02, 3.8224e-02,
7.9884e-02, 5.2461e-02, 1.0318e-01,
4.0054e-02, 1.4695e-01, 1.2577e-01,
-1.8790e-03, -4.9421e-02, 2.3235e-02,
-8.9820e-02, -1.6994e-01, -1.5986e-01,
2.3436e-01, -1.5346e-01, 1.5014e-02,
-3.9139e-02, -7.9388e-02, -4.9057e-02,
-1.1193e-01, -2.5705e-01, 1.1995e-01,
5.7929e-02, 2.4988e-01, -4.9406e-03,
-3.9363e-02, -1.1691e-02, -1.2236e-03,
-2.0521e-01, 2.1901e-01, 1.5957e-01,
2.1062e-01, -1.4157e-01, -3.4340e-01,
3.8520e-02, -2.0820e-01, 2.4570e-03,
1.7211e-01, 2.0214e-01, 1.3821e-01,
-7.1520e-02, 1.4847e-01, -1.3820e-01,
-2.4712e-02, -1.5925e-02, 1.7403e-02,
-3.7515e-02, 3.0461e-02, -2.7543e-02,
8.6148e-02, -6.1486e-02, 1.2610e-02,
2.9748e-03, 1.1778e-01, 2.9032e-02,
-2.1706e-02, -2.2406e-02, 2.6769e-02,
-3.6965e-02, 2.2180e-01, -4.0929e-02,
-3.2629e-03, 8.3419e-02, -1.4587e-01,
-1.3909e-02, -2.0166e-02, -1.0029e-01,
7.6360e-02, 8.0819e-02, -1.0933e-01,
-5.8919e-02, 2.4745e-02, 3.7375e-02,
-1.1333e-02, 1.4747e-02, -7.8958e-02,
-3.1535e-02, 1.7403e-01, 1.3946e-02,
-3.2038e-02, 5.1151e-02, -6.1063e-02,
-8.6472e-03, -6.9689e-02, 5.6846e-03,
5.7914e-02, -1.9818e-01, -7.5321e-02,
8.7453e-02, 7.8354e-02, 2.1997e-02,
-4.7606e-02, 1.3915e-01, 1.1653e-01,
9.6050e-02, 4.0099e-01, 1.5631e-01,
3.1492e-02, 2.4797e-01, 6.8716e-02,
-6.2664e-03, 9.1754e-02, -5.7244e-03,
1.3538e-01, 1.5366e-01, 9.4916e-02,
-4.2115e-02, -3.6585e-01, -1.4559e-01,
9.1550e-02, -5.4007e-02, 6.7482e-02,
-1.8687e-01, 3.2120e-01, 5.1031e-03,
-6.1205e-02, -5.1780e-02, 1.6442e-02,
-1.2316e-02, -1.3907e-01, -1.4446e-01,
-2.7899e-01, -8.5969e-02, -1.0870e-01,
-2.6157e-01, 8.9532e-02, 3.0958e-02,
-1.5393e-01, -4.2781e-02, -2.0951e-01,
2.0328e-01, 4.5317e-01, -3.0467e-02,
-6.1346e-02, 1.0381e-01, -1.3719e-01,
-9.8572e-02, -1.4035e-01, -1.9431e-02,
2.5542e-02, 3.2609e-01, 1.7983e-03,
-1.0800e-01, -2.9022e-02, 6.2691e-03,
2.8937e-02, -1.3483e-01, -4.1655e-02,
2.0172e-01, 1.4283e-02, 9.6200e-02,
1.9027e-02, 3.1240e-01, -2.9553e-02,
6.2776e-02, 1.3845e-01, 4.5834e-02,
-2.3854e-01, -4.0267e-02, 1.5634e-02,
-1.9246e-01, -3.2332e-02, 3.2442e-03,
-6.1880e-02, -8.8192e-02, -6.0172e-02,
2.5002e-01, 1.5148e-01, 6.4459e-02,
-2.1022e-01, -8.3893e-02, 6.9554e-03,
7.0244e-02, -2.9551e-02, 1.6481e-02,
-3.1036e-02, -2.0026e-01, -8.4748e-02,
-1.3108e-01, -1.3784e-01, 9.4900e-02,
-2.1256e-01, -4.1767e-02, 8.4665e-02,
-4.0235e-01, 1.0604e-01, -3.1827e-02,
-4.9825e-02, -9.1267e-04, 1.5527e-02,
-6.5729e-03, -1.8932e-02, -3.4591e-02,
1.1066e-01, 9.3979e-02, 2.6059e-02,
-1.2395e-01, -2.4768e-01, -1.6304e-01,
8.8329e-03, -2.1606e-02, -4.0878e-02,
-1.5581e-02, -1.4829e-02, -1.5959e-02,
-1.0463e-04, -4.2903e-03, -4.6657e-02,
2.2995e-02, 1.7917e-02, -9.1404e-02,
-1.2326e-01, 1.4582e-01, -7.0959e-02,
-1.8058e-02, -8.5228e-02, 4.2799e-02,
-2.2829e-03, 8.6577e-02, -1.1909e-01,
-1.8061e-01, 1.1166e-01, -8.2255e-02,
-1.3190e-01, 7.7123e-02, 2.3224e-02,
1.8661e-02, 2.4461e-02, 3.6060e-02,
-4.5224e-02, -1.7672e-01, 1.6080e-01,
-4.2175e-01, -2.2557e-01, -1.0719e-01,
-2.9506e-02, 9.5020e-02, -6.6465e-02,
-7.2627e-02, 3.1236e-01, 5.5764e-02,
-2.8789e-01, -1.8915e-01, 9.0825e-02,
-5.8618e-02, 6.4082e-02, 4.8461e-03,
-5.9405e-02, 3.2644e-01, -7.1278e-02,
-1.8084e-01, 2.0858e-02, -9.3690e-03,
-7.6565e-03, -9.6854e-02, 7.6121e-03,
1.4791e-01, 4.5612e-01, 1.9889e-02,
-5.5498e-02, -1.1266e-01, 2.2790e-02,
-3.8821e-02, -1.5780e-02, 1.2549e-02,
-3.8232e-02, -2.8870e-01, 2.6216e-02,
1.0375e-01, -2.9621e-02, 1.8479e-03,
5.0207e-02, 1.5189e-01, 1.2533e-01,
1.8298e-01, -1.2870e-01, 3.0681e-01,
-1.9571e-02, -8.6302e-02, 9.1121e-02,
1.0113e-01, -1.8362e-01, 3.2642e-02,
1.7034e-01, -3.1077e-01, -4.8737e-02,
5.9144e-02, 5.6052e-03, 3.2360e-02,
-9.0123e-02, 7.7996e-02, 3.6297e-02,
-3.4389e-01, 1.1841e-01, -2.0900e-02,
9.4930e-02, -9.1504e-02, -4.5308e-02,
3.7723e-03, -3.7580e-02, -6.6410e-02,
5.2501e-02, -1.2530e-01, 3.5944e-02,
3.8378e-02, 9.5188e-02, 2.1952e-03,
-2.4333e-02, 2.7977e-01, 5.6961e-02,
-3.0605e-03, 8.3684e-02, 4.4848e-03,
-7.8935e-02, -1.9544e-01, -5.3311e-02,
-2.6595e-02, 1.2278e-01, -3.1659e-02,
-1.0103e-02, 4.7763e-01, 2.5359e-02,
8.1397e-02, 3.0548e-01, 9.7097e-02,
3.6232e-02, -1.1091e-01, 1.2841e-01,
1.9277e-01, 2.9322e-01, -1.6740e-01,
1.2107e-01, -6.2883e-02, 4.0603e-02,
-1.5750e-01, -8.6183e-02, -1.4194e-01,
1.1932e-01, -3.9175e-01, -5.4495e-02,
-1.4001e-02, -2.0594e-01, -8.2683e-02,
8.6156e-02, 2.1499e-02, 2.2080e-01,
5.5703e-02, -3.6307e-01, 8.3129e-02,
8.9280e-02, -3.5897e-02, 1.6106e-01,
9.1171e-02, -3.1102e-01, 1.2425e-01,
1.0278e-01, -3.1014e-01, -6.9138e-02,
8.0839e-02, -3.6183e-02, 1.0341e-01,
-1.8334e-01, -5.3700e-02, 2.3336e-01,
-1.4464e-01, -5.0320e-01, -2.9836e-02,
-1.7225e-01, -3.9499e-01, -1.7321e-01,
1.7510e-01, 1.7897e-01, -2.6518e-01,
2.3638e-01, 5.0270e-01, -4.9731e-03,
2.2603e-01, 2.5317e-01, 2.4079e-01,
-1.3159e-01, 1.5638e-01, 1.2480e-01,
-6.2164e-02, 7.9458e-02, -9.4804e-02,
8.5690e-03, 7.4971e-03, 8.6630e-02,
-1.3148e-02, 6.8660e-02, -7.4230e-03,
2.9702e-02, 1.2036e-01, 9.5504e-02,
-3.2694e-03, 8.6722e-02, -6.2433e-02,
3.2527e-01, 3.2087e-01, -9.4429e-05,
1.3556e-01, -7.0413e-02, 2.9383e-02,
2.0617e-02, 3.3218e-02, 4.4898e-02,
-4.8260e-01, -2.1329e-01, 1.5890e-02,
-2.6600e-01, -8.8519e-02, -4.3800e-02,
-1.7299e-01, -2.0757e-01, -2.6658e-01,
6.9707e-02, -4.4700e-02, 6.5570e-02,
2.3992e-01, 1.5078e-01, 2.8713e-02,
-9.1197e-02, 1.9765e-02, -1.8751e-02,
-9.9277e-02, -3.1437e-01, 4.0730e-02,
2.4208e-02, -8.8322e-02, -1.6245e-01,
1.3037e-02, -3.4708e-02, -4.4285e-02,
-1.3592e-01, -1.3575e-01, -7.4546e-02,
1.4670e-01, -1.3366e-01, 2.1553e-03,
8.1235e-03, -1.2068e-01, -5.7287e-02,
1.8015e-01, 2.1390e-01, 8.6923e-03,
2.8833e-01, 6.6345e-02, 1.4578e-01,
2.2338e-01, 2.6453e-01, -2.9112e-02,
1.4018e-01, -9.2824e-02, -2.2795e-02,
1.2360e-01, 2.2527e-01, -1.1817e-01,
-3.8872e-02, -1.9982e-02, -7.7514e-02,
1.7744e-03, 3.1736e-02, 4.5882e-02,
-2.5222e-02, 2.4298e-01, -3.8596e-02,
1.2545e-02, 3.1872e-02, 7.1925e-02,
7.9782e-02, -1.5533e-01, -1.4619e-02,
-1.2223e-01, -1.8631e-03, -9.8832e-02,
-1.6815e-02, -8.1440e-02, 6.8038e-02
}
,
{
2.3898e-02, 1.2411e-02, -3.2770e-02,
-2.6029e-01, 3.2690e-01, -1.8246e-01,
1.1224e-02, 8.0193e-02, -5.0412e-02,
-9.3849e-02, 2.0325e-02, 2.6309e-02,
1.2266e-02, 1.7698e-01, 2.7049e-01,
1.2918e-01, 2.0190e-01, 2.7352e-01,
-7.2100e-02, 1.3357e-01, -1.3702e-01,
2.2527e-01, 1.5821e-01, -2.3104e-01,
1.0182e-02, -1.5499e-01, 7.1906e-02,
1.5865e-01, 7.0950e-02, -6.3336e-02,
2.2661e-01, -4.2997e-01, -4.2013e-01,
1.7549e-02, -1.3142e-01, -3.1663e-01,
1.3617e-01, 1.4229e-01, -1.0707e-02,
-1.0986e-02, 2.8816e-01, -3.6239e-01,
2.2579e-02, -1.4332e-02, 7.1339e-03,
-1.4357e-01, -9.7608e-02, 1.4646e-01,
-5.3856e-02, 3.3898e-01, -2.4936e-01,
-2.9500e-02, 2.1799e-02, 1.1901e-02,
3.6996e-02, 2.1291e-02, 3.2150e-02,
9.8375e-02, 2.4476e-01, 2.2896e-01,
1.8392e-01, -7.4510e-02, -1.0152e-01,
4.4757e-02, -4.8053e-03, -6.7254e-02,
-4.8370e-02, -7.8975e-02, -3.6007e-01,
-3.8160e-02, 8.7707e-02, -1.4986e-01,
-8.7544e-03, -4.3522e-02, 7.3822e-02,
-1.4523e-01, 1.1433e-01, 4.4109e-02,
-1.6025e-03, 2.5459e-02, -9.3562e-02,
-2.9192e-02, -1.0975e-01, -5.0943e-02,
-1.1215e-01, 1.9907e-01, 7.9934e-02,
3.7066e-02, 3.0796e-01, -1.4034e-01,
-8.2315e-02, -2.0182e-02, -1.2824e-02,
-4.8007e-03, 1.2655e-01, -2.5157e-02,
2.7796e-02, -4.3032e-02, 2.5397e-02,
6.9377e-02, 2.3642e-01, 1.2713e-01,
2.7878e-02, -1.5325e-01, -1.4871e-01,
1.5800e-02, -4.5935e-02, 1.7370e-01,
4.8058e-02, -1.8725e-01, -6.7048e-03,
-1.3932e-01, -6.0768e-02, -1.6976e-01,
-2.1189e-02, 1.0311e-02, -2.2970e-02,
-7.0546e-03, 7.9481e-02, 1.2146e-02,
4.2666e-02, 3.5383e-01, 1.4381e-01,
5.4384e-02, -9.3862e-02, 4.8870e-03,
2.1141e-02, -6.6826e-02, -1.8526e-01,
1.3309e-01, 3.3452e-01, 1.1058e-02,
-1.6967e-02, 1.1094e-01, 5.3230e-02,
3.0409e-02, -4.7613e-02, -1.7737e-01,
-1.6678e-02, -7.8644e-02, 1.1743e-01,
7.3322e-02, -1.1354e-01, -1.5737e-02,
-1.2397e-03, -1.4685e-02, -1.0192e-02,
1.6045e-01, 3.6331e-02, 1.2219e-01,
1.3123e-01, 5.7578e-02, 1.0291e-01,
1.7424e-01, 1.0688e-01, 1.4263e-01,
8.9942e-02, -2.7141e-02, 3.1238e-02,
-4.0240e-02, -1.0930e-01, -2.1276e-01,
1.0357e-01, 5.7673e-02, 1.0356e-02,
-2.0864e-01, -1.9405e-01, 2.5094e-01,
-4.8277e-03, -1.3758e-01, 1.1562e-01,
-1.0358e-01, 2.0631e-01, -9.1445e-03,
-1.7602e-01, 1.0200e-01, 3.0032e-02,
-1.1495e-02, -4.5077e-02, -6.4748e-02,
-2.3072e-02, -3.2342e-02, 1.4503e-02,
-3.7052e-02, -1.2206e-01, 5.5395e-02,
2.8331e-02, -4.2812e-03, 6.9807e-02,
4.3593e-02, -6.7373e-03, 1.2760e-02,
3.2896e-03, -2.4007e-01, -5.2920e-02,
2.5193e-02, -2.1480e-01, 8.4654e-02,
2.2642e-02, 8.2132e-02, -2.3864e-02,
-2.9726e-01, 8.0405e-02, -1.3190e-02,
-1.1310e-01, -4.4342e-01, -6.3536e-02,
-6.7090e-02, 1.1797e-01, 1.5315e-01,
7.7829e-02, -1.4494e-01, 1.0233e-01,
9.7059e-02, 1.2772e-01, -2.4394e-02,
-2.6179e-02, 2.6721e-02, 1.1707e-02,
-4.8024e-02, -2.3366e-01, -1.6978e-01,
-2.4402e-01, -2.8572e-01, -2.4053e-02,
-2.7451e-03, 7.1959e-02, 4.4706e-02,
-1.9900e-01, 2.1353e-01, 1.0625e-01,
4.0246e-01, 4.2323e-01, 3.4046e-02,
-1.6943e-01, -2.0221e-01, -1.6369e-01,
1.3882e-01, 2.1717e-01, -1.3581e-01,
1.3975e-01, 1.1980e-01, 1.8888e-02,
-1.8110e-01, -2.6143e-01, -1.0109e-01,
5.5844e-02, -1.2175e-01, 3.4447e-02,
8.9688e-02, 2.4641e-01, 2.3287e-01,
-5.8259e-02, -1.3656e-01, -1.3936e-02,
-8.3429e-03, 2.3026e-01, 1.2302e-01,
-2.2969e-02, 6.0932e-02, 3.4749e-02,
1.2910e-01, 2.4008e-01, 1.8908e-01,
-5.8776e-02, 3.8121e-01, 8.1312e-02,
9.1175e-02, -1.8729e-02, -4.6156e-02,
3.7493e-02, -3.5877e-02, -9.9651e-03,
1.5864e-01, 1.3611e-01, 6.7880e-02,
2.2216e-01, 9.3697e-02, 7.4782e-02,
-1.0861e-01, -2.5824e-01, 6.6455e-02,
9.2238e-02, -2.3448e-01, -3.4057e-01,
-2.9658e-01, 9.4698e-03, 1.9315e-01,
-5.2396e-02, 1.2310e-01, -5.2917e-02,
-4.3708e-03, 1.9560e-01, -2.4309e-02,
-6.7388e-02, -8.8839e-02, -2.0907e-02,
4.6550e-02, 3.4119e-02, 6.0977e-02,
-1.0054e-02, 1.4411e-01, 1.5622e-01,
1.7401e-02, 2.5685e-01, -9.1853e-03,
-4.4530e-02, -1.8623e-01, -8.4557e-02,
9.5962e-02, 2.6491e-01, 1.7854e-01,
-2.0547e-02, -1.2023e-01, -7.6897e-02,
-1.3418e-01, -1.4960e-01, 1.6292e-01,
-1.7275e-01, -6.0181e-02, -2.7034e-02,
-7.4189e-02, -3.5566e-02, 1.3995e-01,
3.0758e-02, 3.3476e-02, 6.9837e-03,
-6.1089e-02, -9.6021e-02, 7.1716e-03,
1.0389e-01, 4.7963e-02, 9.5921e-02,
4.4569e-02, 1.2230e-01, -1.4417e-01,
-1.2825e-02, 3.1980e-01, -3.5905e-01,
-1.2557e-01, -7.5283e-02, -1.2343e-01,
1.9791e-01, 7.9003e-02, 3.1163e-02,
1.0969e-01, 1.6839e-01, -2.5816e-01,
-1.2617e-01, 1.3686e-01, -2.1078e-01,
-2.1870e-02, -1.8378e-01, -2.8893e-01,
-8.2523e-02, -3.0475e-02, 9.6007e-02,
1.0669e-01, -1.4581e-03, 3.2441e-01,
-8.1872e-03, 1.1690e-02, -4.0179e-02,
-1.0835e-01, 3.6112e-01, -4.5990e-02,
-1.2355e-01, -1.3372e-01, 3.8136e-02,
-9.1530e-03, 3.5432e-02, 4.3950e-02,
-8.6859e-02, 1.5887e-01, 1.2796e-02,
1.3554e-02, -1.5669e-01, -1.4371e-02,
-4.6609e-02, 1.7114e-01, -7.8284e-02,
1.7611e-01, 4.1204e-01, 9.3281e-02,
1.1420e-01, 1.2951e-01, -7.6025e-02,
-5.4831e-02, 9.7574e-02, 3.2839e-02,
3.8475e-02, -6.0247e-02, -2.9627e-02,
-2.4367e-02, 1.3143e-02, 4.7017e-02,
2.3800e-02, -2.4046e-02, -5.7044e-02,
2.7280e-02, 7.8573e-01, 1.0079e-02,
6.4100e-02, 5.1584e-02, 7.9653e-03,
-8.9480e-02, -1.6207e-01, -8.9418e-02,
-3.5589e-02, 3.5903e-01, -1.8381e-01,
9.2356e-02, 8.8046e-02, -5.0229e-02,
1.8609e-02, 1.1243e-01, 5.2599e-02,
-1.3374e-02, -3.3097e-01, 6.5346e-02,
2.6760e-01, -1.0281e-01, 1.1607e-02,
7.6576e-03, -3.5957e-02, 3.1924e-02,
-7.0088e-02, 9.1241e-02, 1.2827e-02,
3.7165e-02, 7.0273e-03, -7.3945e-04,
-6.5406e-03, 7.2666e-02, -5.7348e-02,
-1.9100e-01, -7.4449e-02, -1.2496e-01,
1.5299e-01, -8.8047e-02, -2.1810e-02,
-3.0241e-02, -7.4310e-03, -8.7682e-02,
-2.2479e-02, 9.6008e-02, -8.4539e-02,
-2.8915e-02, 1.7538e-01, -3.7735e-02,
-9.8463e-03, -6.9618e-02, -2.6095e-01,
9.9950e-02, 5.0534e-01, -1.8812e-01,
-1.1986e-01, 7.1166e-02, -2.4769e-02,
8.8529e-02, 9.8348e-02, 2.1136e-02,
-9.0337e-03, 1.3679e-01, -1.2115e-01,
-6.2478e-03, 1.1436e-01, -3.4610e-02,
-2.7350e-02, 1.0702e-01, 1.6220e-02,
1.0912e-02, 1.0953e-01, 8.6762e-02,
2.9348e-03, -2.2035e-02, 1.2376e-01,
7.0102e-02, -1.0945e-01, -1.6640e-01,
-3.9916e-03, -2.6658e-02, -9.7031e-02,
-3.0047e-02, 1.6631e-03, -5.5031e-02,
-7.9624e-02, 1.9976e-01, 1.9582e-01,
2.1377e-01, 3.5835e-01, 1.7012e-01,
-9.7751e-02, 4.9143e-01, 1.0988e-01,
8.4055e-02, -7.3187e-03, -9.8808e-02,
5.0590e-02, -8.9291e-02, -6.6857e-02,
9.6737e-02, -3.0699e-01, 2.2889e-01,
2.6727e-40, -5.2704e-40, -4.5038e-40,
-3.3108e-40, 5.2330e-40, -1.2724e-40,
-3.2957e-40, -5.8613e-40, 2.1618e-40,
-4.3882e-40, -3.3950e-40, 5.9372e-40,
2.7277e-40, -1.3741e-40, -3.3597e-40,
5.0687e-40, 4.7873e-40, -3.2116e-40,
-6.1388e-40, -6.0790e-40, -5.2667e-40,
-5.6524e-40, -6.1696e-40, -5.9796e-40,
1.5824e-40, -5.2002e-40, -5.8960e-40,
-5.9860e-40, 3.6419e-40, 2.9975e-40,
-5.8988e-40, 3.3994e-40, -5.0611e-40,
3.6410e-40, 2.9550e-40, 4.7468e-40,
2.7503e-40, -3.4103e-40, 6.0339e-40,
-1.7691e-40, 6.7170e-41, 1.7101e-40,
2.7166e-40, 4.3023e-40, 2.7735e-40,
-3.1937e-40, -4.9247e-40, -6.2495e-40,
5.2938e-40, -3.3702e-40, 1.4976e-41,
1.4031e-40, -4.6995e-40, -5.2409e-40,
2.5460e-40, 2.6670e-40, -4.5339e-40,
4.2896e-40, -5.7141e-40, -1.7003e-40,
2.3597e-40, 1.3748e-40, 4.6163e-40,
4.0680e-41, -6.1642e-40, 2.7304e-41,
5.2250e-40, -3.9481e-40, -6.1808e-40,
1.9462e-40, 2.6005e-40, -2.7281e-40
}
,
{
1.3625e-02, -8.5594e-02, -1.9901e-01,
-6.4636e-02, -1.9030e-02, 4.1963e-02,
-7.5507e-02, -2.4474e-01, -4.2621e-02,
2.8195e-02, 7.3102e-02, -9.3331e-02,
7.7093e-02, 1.7800e-01, -7.6451e-02,
2.8565e-02, -1.3540e-01, -1.9169e-01,
-1.8583e-02, 3.0135e-02, 8.1094e-03,
-1.2835e-01, -1.8041e-01, -8.9020e-02,
-8.2731e-02, 3.7861e-02, -9.4014e-02,
4.6595e-02, 2.2052e-02, -1.5867e-01,
-1.0937e-02, 1.0030e-01, -1.3018e-01,
-9.1844e-02, -1.7508e-01, 2.2087e-01,
-9.3080e-02, 9.8069e-02, -7.0154e-02,
-6.6063e-02, -2.2142e-01, 4.1058e-01,
-6.5947e-02, -5.4662e-02, 9.9412e-02,
-5.1938e-02, 3.0932e-03, 1.8126e-01,
3.6701e-02, -3.0349e-01, 9.9839e-02,
2.5810e-02, 2.3644e-01, -2.4461e-01,
2.1054e-01, 1.5630e-01, -1.9587e-01,
5.0146e-02, -1.8844e-02, 3.6675e-01,
-4.0389e-03, 3.1596e-01, 3.6771e-03,
-2.2256e-40, 1.4272e-40, -2.0732e-40,
5.5913e-40, -6.0538e-40, 1.2791e-40,
4.5825e-41, 4.1080e-41, -1.8211e-40,
2.2687e-01, -5.8992e-02, 4.7796e-03,
6.0603e-01, 2.7961e-01, 1.5973e-02,
2.3035e-01, 1.3031e-01, -9.9280e-03,
-4.7235e-02, 5.1773e-02, -4.8586e-02,
-1.4510e-01, -1.7336e-01, 1.0981e-01,
-2.0303e-01, -1.6008e-02, -1.8524e-03,
-2.3440e-01, -3.2373e-02, -6.7911e-02,
-1.6256e-01, 1.2316e-01, 2.7859e-02,
8.5089e-04, -3.7401e-02, -1.8672e-02,
-1.0418e-01, -7.8407e-02, -1.8413e-02,
8.2834e-02, 2.3128e-01, 3.2983e-02,
3.1099e-02, -6.4485e-02, -8.1659e-02,
1.9152e-01, -1.9609e-02, 2.7364e-02,
1.0458e-02, -1.2507e-01, 4.1334e-02,
-4.6215e-02, 5.6944e-02, 2.1477e-02,
-1.4934e-01, -6.8383e-02, 2.7957e-02,
-3.6846e-01, 4.8766e-01, 6.4000e-02,
-3.9621e-02, -8.1667e-03, 4.5997e-02,
-6.1391e-02, 1.2976e-02, -3.2152e-02,
7.5767e-02, 1.2931e-01, -2.3498e-02,
4.0320e-02, 1.3876e-02, 1.1022e-02,
-6.2401e-41, 5.8564e-40, 3.9473e-40,
-5.6890e-40, -2.6022e-40, -2.9841e-40,
-4.2456e-40, -1.1546e-40, 4.4955e-40,
-4.2969e-02, -1.0995e-01, 1.3021e-01,
1.0142e-01, 5.2225e-01, -5.5486e-02,
-7.2349e-02, 8.5470e-02, 2.3438e-02,
-1.0690e-01, -1.4370e-01, -1.2632e-01,
2.8754e-02, 1.1662e-01, 5.6515e-02,
-1.5726e-01, -1.4945e-01, -4.4956e-02,
1.6574e-01, -5.6894e-02, -2.0851e-01,
8.1498e-03, -2.5441e-01, -1.4412e-01,
-1.0959e-02, -2.5811e-02, 8.8934e-02,
6.3594e-02, -9.3314e-02, 7.8247e-02,
4.6795e-02, -2.2774e-01, 7.1041e-02,
1.4830e-01, 1.9911e-01, 5.1978e-02,
7.4936e-02, 2.3104e-02, 6.3928e-02,
-1.3118e-02, 6.7544e-02, 7.9514e-02,
2.2335e-02, -9.9442e-02, 6.8070e-03,
2.4395e-02, -3.3576e-02, 5.5508e-02,
-4.0872e-02, 5.4501e-02, -5.7051e-02,
8.6621e-03, -1.5361e-01, 1.2630e-01,
-2.2344e-01, 1.3335e-01, -1.1688e-01,
-2.4232e-01, 3.3319e-01, -1.2580e-01,
-2.2169e-02, 2.0594e-01, 2.6521e-02,
4.1883e-40, -3.4540e-40, 4.9152e-40,
-1.5711e-40, 3.3927e-40, -5.5069e-40,
5.5831e-40, -5.2011e-41, 1.0351e-40,
1.7989e-01, 2.3787e-02, 5.7447e-03,
4.8748e-01, 3.0152e-01, 3.5517e-02,
2.2155e-01, 1.8812e-01, 3.0994e-02,
7.8657e-02, -7.1135e-02, -5.8293e-02,
-1.4220e-01, 1.6004e-02, -2.5180e-02,
-1.6811e-01, -2.3441e-01, 1.4810e-02,
5.3140e-02, -1.2904e-01, -1.5105e-02,
5.4525e-02, -1.5418e-01, 6.6507e-02,
8.3947e-02, -1.1975e-01, 5.3902e-02,
8.0834e-02, -2.4321e-01, -1.0282e-03,
3.1276e-03, 3.2495e-01, -1.3238e-02,
4.5285e-02, 5.8777e-02, -1.3231e-01,
-6.0928e-03, 8.7145e-02, 6.2031e-02,
-5.3919e-01, -6.8810e-02, -1.0755e-01,
-2.2571e-02, 2.6237e-02, -6.8731e-03,
-6.6771e-02, -2.0586e-01, 4.7722e-02,
-3.4968e-01, 3.0912e-01, 2.4487e-01,
-4.9537e-02, -5.2779e-04, 6.7840e-02,
1.7583e-02, 3.3222e-02, -5.7070e-02,
-2.3250e-01, 1.4470e-01, -4.9895e-02,
3.3147e-02, 8.6319e-02, 4.4719e-02,
-6.9454e-41, 2.0308e-40, -1.1977e-40,
5.9045e-40, -2.6129e-40, 4.8298e-40,
4.7288e-40, 6.0736e-40, 2.2462e-40,
-4.0294e-02, -9.1437e-03, -2.4926e-02,
-2.1269e-01, 1.1602e-01, 1.4383e-02,
5.1456e-02, 6.9047e-02, 1.6519e-02,
6.3737e-02, -9.0181e-02, 7.0716e-02,
7.0061e-02, 7.9046e-02, -4.3925e-02,
7.4396e-02, -5.2797e-02, 3.8125e-02,
7.5999e-02, -5.1307e-02, 2.4326e-03,
-3.1716e-02, -1.2567e-01, -3.3898e-02,
8.4925e-02, -5.2404e-02, 2.8535e-02,
9.6844e-03, 4.6980e-02, 3.8552e-02,
-5.7110e-02, 3.2163e-02, 1.5219e-02,
6.6905e-02, -2.7934e-02, 1.4184e-03,
-2.4239e-02, -8.6317e-03, -2.3295e-03,
-2.3065e-02, 1.0076e-01, 2.1562e-03,
-1.3647e-02, -3.4262e-02, 2.5777e-02,
7.6601e-02, 1.3654e-01, 2.1458e-03,
1.4542e-01, 3.6310e-01, 1.6266e-01,
-5.8465e-02, 4.3751e-02, 1.9227e-02,
9.1783e-03, -5.9547e-02, -1.8234e-02,
-5.3399e-02, 1.9218e-01, -4.6238e-02,
-1.9052e-01, 1.4635e-02, 2.9536e-02,
1.4621e-40, -5.5132e-40, -4.6215e-40,
4.3948e-40, -2.7285e-40, -5.5709e-40,
1.9428e-41, -4.0333e-40, -5.4469e-40,
9.3126e-02, -1.3236e-01, 9.9350e-02,
-1.3308e-01, 3.5030e-01, 9.2221e-02,
1.1783e-01, 1.6648e-01, -7.9150e-02,
2.2654e-01, -1.2546e-01, -1.2354e-01,
-1.6457e-01, -6.0740e-02, -3.1069e-02,
-8.3203e-02, -1.8064e-01, 4.6900e-02,
1.2059e-01, -1.0569e-01, -7.1196e-02,
-9.2991e-02, -1.7587e-01, 1.3100e-03,
-1.5492e-01, -1.3849e-01, 1.2245e-01,
-5.5276e-02, -9.7867e-02, 3.5550e-02,
-6.0264e-02, 4.7760e-02, 6.0242e-02,
-5.4096e-03, 2.4646e-01, 6.3592e-01,
5.8559e-02, 6.1117e-02, 8.0334e-02,
-4.4582e-03, -1.2028e-01, 8.7394e-02,
-2.5880e-02, -1.2206e-01, 1.2199e-01,
4.1990e-02, -1.3283e-01, 4.9047e-02,
-4.9532e-02, 2.7688e-01, -4.6064e-03,
-2.8812e-03, -2.4404e-01, 5.8614e-02,
-1.4262e-01, -1.2810e-03, -1.2060e-01,
-8.3595e-02, 5.6532e-02, -7.7556e-02,
-1.3364e-01, -1.3883e-01, -1.2335e-01,
-1.3273e-40, 6.5184e-41, -4.6946e-40,
-4.0031e-40, -1.2807e-40, -3.1584e-40,
1.3009e-40, 2.4187e-40, -1.4202e-40,
-8.8844e-03, 1.0101e-03, -6.0190e-02,
-1.8851e-01, -7.6662e-02, -1.4562e-01,
2.9983e-02, -8.1533e-02, 1.1256e-02,
1.0205e-01, 6.7850e-02, -1.0911e-01,
-1.2846e-01, -5.4605e-02, 6.2182e-02,
-1.0797e-01, -5.1281e-02, -1.2036e-02,
-8.1693e-02, -7.0432e-02, 1.6990e-01,
-1.7329e-01, -2.2084e-01, -3.0977e-02,
8.2771e-02, -3.3089e-01, -1.4842e-01,
1.9576e-02, -1.5953e-01, -1.0348e-01,
6.6014e-02, 6.0094e-01, -6.9891e-04,
7.4969e-02, -1.4250e-01, 4.3221e-02,
1.6796e-02, -6.8125e-03, 4.7028e-02,
-3.3421e-01, -2.2987e-01, 4.2936e-02,
9.3985e-04, 9.0827e-02, 2.4211e-01,
-8.1571e-02, -1.0276e-01, 1.9092e-01,
2.1112e-01, 2.6837e-02, -2.5822e-01,
-1.3290e-01, 1.6135e-01, -2.7672e-02,
3.4465e-01, -8.3286e-03, -6.1936e-02,
2.7406e-01, -6.8357e-02, 1.7426e-01,
-9.0872e-02, 1.2999e-01, 7.2366e-02,
3.0944e-40, -1.2808e-40, 2.9336e-40,
5.5561e-42, 3.0978e-40, 1.0027e-40,
-1.5881e-40, -2.9858e-40, 3.1599e-41,
-9.1935e-02, -2.2666e-04, -6.2821e-02,
-1.8605e-01, 3.0238e-01, 3.2759e-02,
-5.0771e-02, 1.4585e-02, -1.0872e-01,
2.5511e-02, -9.3394e-02, 1.4810e-02,
-6.2906e-02, 9.2472e-02, 1.2845e-02,
-2.9041e-01, -9.6489e-03, -2.7277e-02,
-6.9896e-02, -1.1645e-01, -5.9870e-02,
-2.8037e-02, -2.2649e-01, 5.1781e-02,
-1.4588e-02, 4.8753e-02, -2.8256e-02,
-1.6462e-02, 8.0795e-02, 3.6222e-02,
8.0392e-02, 3.0118e-01, 2.0021e-01,
1.0394e-01, 6.4196e-01, 4.9545e-01,
2.1242e-02, -1.2514e-01, 1.0066e-01,
-4.7676e-02, -2.0736e-02, -5.6951e-03,
-8.3021e-02, 4.6763e-02, 1.7551e-01,
2.0038e-02, 1.8084e-01, 1.3244e-02,
1.0280e-02, 2.8740e-01, 8.9837e-03,
-2.9437e-02, -3.7366e-01, -1.1861e-01,
-4.8248e-03, -1.2970e-01, -1.8680e-02,
1.8458e-01, 5.6509e-02, 1.2734e-01,
1.9423e-01, -3.6960e-01, -2.5555e-02,
6.7959e-41, -3.2251e-40, -3.0631e-40,
-4.0701e-40, 9.7399e-41, 2.2917e-40,
2.0169e-40, 5.7891e-40, -4.1286e-40
}
,
{
5.6253e-02, 1.0118e-02, -8.2749e-02,
-6.4074e-02, 4.0723e-02, 1.1657e-02,
-1.1560e-01, -3.5596e-03, -2.6713e-02,
-7.9090e-02, -2.9223e-01, 1.5759e-01,
6.8756e-02, 1.5738e-01, 1.5413e-01,
-6.1288e-02, -1.2536e-01, -1.5966e-01,
1.1165e-01, 5.0211e-02, -1.0338e-01,
-5.2364e-04, 1.7660e-01, -2.2504e-03,
-1.7697e-01, 1.8500e-02, 2.0693e-02,
-2.5907e-02, -1.4201e-01, 8.4467e-02,
1.1138e-02, 2.1769e-01, -4.2422e-01,
6.5046e-02, 2.6834e-02, 2.9047e-03,
-1.2130e-01, -5.1773e-01, -8.0393e-02,
3.0204e-02, 3.5952e-01, 1.6681e-01,
-9.4720e-04, 7.7291e-02, 8.3039e-02,
3.4689e-01, -1.2389e-01, -2.0666e-01,
-2.9650e-02, 1.1102e-01, -1.4782e-01,
3.2193e-02, -3.9862e-02, 1.6440e-02,
-8.4264e-02, 1.0192e-01, -6.4256e-02,
2.2950e-02, -6.6511e-02, -6.3814e-02,
4.3744e-02, -1.0557e-01, -1.2045e-02,
1.6330e-01, 6.6130e-01, 1.5497e-01,
1.7103e-01, 1.5073e-01, 1.7400e-01,
9.0985e-04, 1.0917e-02, -1.3322e-02,
-6.4273e-02, -6.2178e-02, -7.7223e-02,
-1.0332e-01, -2.1072e-01, -2.2843e-03,
3.2717e-02, -6.3754e-02, 5.0359e-02,
-5.2566e-02, 6.2090e-02, -1.5614e-02,
1.4570e-02, -1.0243e-01, 1.3091e-01,
-2.9988e-02, -7.5897e-02, -9.4541e-04,
-2.7999e-01, -4.7415e-03, 5.6419e-02,
7.0565e-02, -4.9273e-01, -1.2936e-01,
5.5685e-02, -5.8924e-03, -3.1967e-02,
8.8602e-02, 2.9337e-01, 1.3753e-01,
1.0063e-02, 1.6348e-02, 1.0063e-01,
3.6230e-02, 1.7968e-02, -1.1624e-01,
-2.2488e-02, 1.3474e-01, -1.1419e-01,
2.8576e-02, -7.4794e-02, -7.7261e-02,
5.8874e-02, -2.9448e-03, 6.0207e-02,
1.4642e-01, 1.2321e-01, -2.4936e-01,
2.2609e-02, -2.8171e-01, 1.1510e-01,
2.6056e-02, -2.7532e-02, -4.7505e-02,
-2.8762e-02, -1.2610e-02, -8.3766e-02,
-5.0992e-02, -5.7269e-03, -7.0981e-02,
-9.6191e-02, -9.2384e-02, -5.3328e-02,
2.3989e-01, 3.9819e-01, 1.8451e-01,
3.6888e-02, 1.1023e-01, 4.4804e-03,
-4.4140e-03, -4.8275e-03, 2.0018e-02,
-2.4346e-02, -6.5546e-02, -4.6065e-03,
2.2298e-01, 2.8810e-01, 1.4071e-02,
-1.7315e-01, -5.7961e-02, -9.9136e-02,
3.6456e-02, -1.5518e-02, 6.4490e-02,
4.6983e-02, 5.2743e-02, 3.0802e-01,
6.7940e-02, 5.8777e-03, 3.1155e-01,
9.9510e-02, 2.7974e-02, -6.6716e-02,
3.7042e-01, 2.0813e-01, -3.1581e-02,
7.9064e-02, -1.3699e-01, -4.4722e-02,
-8.4753e-03, 8.0676e-02, 1.5771e-01,
-1.1467e-01, 5.6269e-02, 1.1369e-01,
-1.4727e-02, 3.7263e-02, -2.0554e-01,
8.3383e-02, 4.5848e-02, -1.1732e-02,
4.5494e-02, -2.1406e-01, 6.0591e-02,
4.6503e-02, -1.0362e-01, 3.8794e-02,
-4.6633e-01, 1.4504e-01, 1.4999e-01,
2.9642e-01, -4.8807e-01, -1.6012e-01,
1.6708e-01, 9.5313e-02, -7.5981e-02,
-4.2655e-02, 9.2470e-02, -7.7242e-02,
-2.1021e-01, 1.2423e-01, 1.4967e-02,
-5.4129e-02, 7.4355e-02, -4.7068e-02,
-1.6048e-01, 9.8742e-02, 4.4282e-02,
-6.0187e-02, 1.9495e-01, 8.3291e-02,
-7.5190e-02, -6.8429e-02, 3.7391e-02,
5.1413e-04, 1.5098e-01, -1.1549e-01,
1.6875e-01, 1.8040e-01, -1.3162e-01,
7.7101e-02, 2.0816e-01, 7.6289e-02,
-1.7528e-02, 1.4408e-02, 3.7500e-02,
3.8647e-02, 1.6850e-01, 1.7535e-02,
-2.8205e-02, 1.0273e-02, 1.6688e-01,
4.3676e-02, 6.9895e-02, 8.1063e-03,
-2.6117e-01, -1.0920e-01, 5.2209e-02,
-5.2749e-02, -1.7062e-02, -9.6808e-02,
2.7324e-02, 9.1342e-02, -5.0968e-02,
1.0689e-01, 5.0565e-01, 4.6004e-01,
-6.6862e-03, 3.4162e-03, 3.3559e-01,
3.5084e-02, 1.9123e-02, 1.0073e-02,
1.6995e-01, 3.4099e-01, -4.0847e-01,
-5.5317e-03, 4.0230e-02, -2.0305e-01,
-8.9786e-02, 1.9667e-01, 3.8111e-02,
3.0607e-02, -1.9084e-02, -6.5114e-02,
8.5394e-02, -1.3992e-01, 1.4988e-02,
-1.5926e-02, -9.1200e-03, -7.2328e-02,
1.3548e-01, 7.1040e-01, -9.4208e-02,
2.5411e-03, -7.2159e-02, 1.0848e-01,
-8.9029e-02, -8.6339e-02, -2.7546e-02,
6.0378e-02, 2.8401e-01, -6.6550e-02,
-3.0486e-02, 5.0307e-02, -1.1084e-02,
2.9732e-02, 9.9960e-02, -7.7408e-02,
3.4940e-01, -5.6048e-01, 2.9053e-02,
-2.6991e-02, 4.9637e-02, -3.9322e-02,
-1.0418e-02, 1.0931e-01, -6.1609e-02,
3.6057e-02, 9.3866e-02, -1.0339e-01,
-1.8572e-02, -2.0889e-02, -7.4531e-02,
-7.3236e-02, -4.5908e-02, 2.2705e-02,
-1.5148e-02, 2.1735e-01, 2.2477e-02,
-3.4153e-02, -2.6939e-02, -5.0167e-03,
6.6774e-02, 2.0168e-01, -7.5083e-02,
5.6608e-02, 2.2799e-01, -3.7473e-01,
-7.2336e-02, 4.4329e-02, -3.6747e-02,
3.5355e-02, 1.8671e-01, -4.0167e-02,
1.2871e-01, 3.5050e-01, 1.8090e-01,
-6.2429e-02, 6.2184e-02, 6.8804e-02,
-8.0164e-02, -2.4387e-02, -5.0309e-03,
1.0089e-01, -3.0008e-02, 1.7251e-02,
-9.4662e-03, -1.4760e-02, 7.3434e-03,
7.3290e-02, 2.2546e-02, -2.9015e-02,
7.9944e-02, -2.6972e-01, 7.1349e-02,
-1.7026e-02, 1.1461e-01, -4.1288e-02,
-5.3732e-02, -2.4618e-01, -1.2890e-02,
8.6133e-02, 1.9503e-01, 8.2202e-02,
-1.0060e-03, -4.5931e-04, -1.8789e-02,
-4.0843e-02, -7.8149e-03, -6.1464e-02,
-7.9364e-02, -5.9647e-02, -5.4059e-03,
1.9553e-01, -2.4079e-01, -7.9538e-03,
5.3620e-02, 1.4198e-01, 6.5651e-03,
2.3512e-02, -2.6609e-02, -4.6435e-02,
1.2499e-02, 5.1079e-02, -2.2713e-02,
-7.1554e-02, 1.0608e-01, 5.8972e-02,
1.8638e-01, -2.1053e-01, -6.4009e-02,
1.0851e-01, 7.2187e-02, 8.9722e-02,
-4.5365e-04, 1.0826e-01, -6.4141e-02,
-2.3874e-02, -4.6307e-02, -2.7813e-02,
1.8385e-02, 9.4687e-02, 6.8374e-02,
9.4526e-02, 1.4432e-02, 1.5937e-01,
1.1292e-01, -3.4274e-01, -1.0813e-01,
-7.4636e-03, 3.7101e-02, 3.7226e-02,
3.7079e-02, -3.9169e-02, -3.7752e-02,
-7.9021e-02, 8.5978e-02, 1.0958e-02,
-5.8576e-02, 5.5931e-02, 4.8301e-02,
-1.3402e-01, -3.3809e-01, -4.4369e-02,
1.4262e-01, 6.5254e-02, -3.3366e-01,
1.2416e-02, -9.0492e-02, -5.8205e-02,
-1.4886e-01, 4.0598e-02, -1.4219e-01,
2.0223e-03, -2.8673e-01, -3.3622e-01,
1.9191e-02, -2.2104e-02, 1.9048e-02,
6.0021e-02, 2.2520e-01, -5.3972e-02,
1.6226e-01, -2.1918e-01, -5.2117e-02,
-6.2363e-03, 2.0266e-01, -7.3323e-03,
1.1137e-01, -1.9300e-02, -5.4983e-02,
-1.8338e-01, 6.2511e-01, -1.7909e-01,
1.7003e-01, 1.7902e-01, 5.4462e-02,
5.6847e-02, -7.4696e-02, -1.1354e-02,
1.0544e-01, -1.4918e-01, 4.8208e-02,
-5.6262e-02, -2.3303e-01, -2.9916e-02,
-3.3261e-02, 1.3287e-01, 1.9831e-02,
-1.3907e-01, -1.6180e-01, -7.2323e-03,
-5.1689e-02, 6.3121e-02, -1.4480e-01,
1.1143e-01, 4.9625e-02, -5.4369e-02,
-3.9247e-01, 2.3412e-01, -3.6726e-02,
-1.1468e-02, 3.4045e-02, 6.6454e-02,
-5.0103e-02, 6.1740e-02, 4.2922e-03,
1.7669e-01, -8.1250e-03, 6.3694e-03,
-6.7723e-02, 7.4576e-02, 1.0113e-02,
1.1264e-01, -4.4691e-02, -5.3575e-02,
3.4691e-02, -1.2201e-02, -8.4221e-02,
2.3677e-01, 3.9073e-01, 2.4710e-02,
-8.4580e-02, -1.0747e-01, -6.5695e-02,
1.5386e-01, 1.4041e-01, 6.9961e-03,
2.6138e-02, 2.3149e-02, -1.8820e-02,
-3.3541e-02, 3.2089e-02, -1.8916e-02,
1.0564e-01, -7.5319e-02, -5.4282e-02,
-6.9388e-03, -2.0873e-02, 5.6100e-02,
2.3524e-02, -6.4296e-02, 5.8950e-02,
-3.1415e-03, -4.1203e-02, 1.0781e-01,
1.7848e-02, -2.9535e-02, -1.6412e-02,
-4.6649e-02, 8.1277e-02, -5.9918e-02,
8.1522e-02, -9.2037e-02, 8.1039e-03,
-6.5541e-02, 5.1811e-02, -1.4380e-03,
5.0419e-02, 9.3091e-03, -2.8054e-02,
-3.0979e-02, -2.5366e-02, 3.5265e-02,
-3.7730e-02, 5.7574e-02, 3.4683e-02,
4.8819e-03, -2.9519e-02, 3.7740e-02,
6.4546e-02, -3.7272e-01, -8.5393e-02,
-3.0223e-02, -7.7899e-02, 2.7365e-03,
2.2282e-02, -3.3440e-02, 1.9048e-02,
2.3275e-02, -2.1153e-02, -2.0385e-02,
-4.6245e-02, 2.2443e-02, -3.0206e-02,
-2.5302e-02, -1.1418e-02, 4.8228e-02,
5.8367e-02, -4.3062e-02, 2.2814e-02,
-4.6279e-02, 5.0052e-02, 2.2961e-02,
-5.4984e-02, 1.4773e-01, -2.5546e-02,
3.3025e-02, -1.0138e-01, 6.3886e-02,
1.2403e-02, 1.6215e-02, 1.0783e-02
}
,
{
2.5042e-02, -5.3266e-02, 3.8484e-02,
3.7189e-03, 1.0493e-01, 1.4459e-01,
-3.7442e-02, -1.5744e-01, 1.9957e-01,
-1.9203e-02, 1.6256e-02, 4.2906e-03,
-3.1637e-02, 5.0287e-01, -6.9504e-02,
1.4677e-03, -8.9984e-02, -9.0376e-02,
4.0578e-02, 2.4004e-02, 3.4044e-03,
7.5916e-02, -1.3564e-01, -9.0296e-02,
3.4156e-02, 7.2494e-02, -2.0037e-02,
-6.4614e-02, -1.7301e-03, -3.3444e-02,
-2.7950e-01, 7.1351e-01, 4.2825e-02,
2.4797e-02, 5.4162e-04, -8.9676e-02,
3.8002e-02, -2.7692e-02, -1.7757e-02,
1.9356e-01, 1.9598e-02, -1.0862e-01,
2.5734e-02, 1.1703e-02, -7.3912e-02,
-6.0213e-04, 1.6024e-01, -6.4591e-03,
3.1779e-02, -3.1049e-01, 1.2684e-02,
-1.0098e-01, -1.8839e-01, 5.1387e-02,
5.2004e-02, 3.1489e-01, 5.9716e-01,
-7.2238e-02, 3.4332e-01, -2.0655e-01,
1.1013e-03, -5.0328e-02, -4.6118e-02,
9.4442e-04, 2.7964e-02, 1.7672e-02,
-8.6022e-02, -3.8280e-02, 2.8017e-04,
3.3824e-02, -6.7883e-02, 1.0529e-02,
-6.5982e-02, 1.1385e-01, 3.0091e-03,
1.2330e-01, 6.1876e-01, 5.7145e-02,
-4.3835e-02, -6.8186e-01, -1.0917e-01,
3.2006e-02, -2.0627e-03, -6.9043e-02,
7.2219e-02, -3.2393e-01, -2.6657e-02,
1.3523e-02, 1.8099e-01, 4.9168e-02,
7.1367e-02, 9.8283e-02, 1.0425e-01,
2.2286e-01, -5.9374e-01, 1.0014e-01,
6.5700e-02, 1.3618e-02, -7.4045e-02,
1.0481e-01, 3.0734e-02, 1.0431e-02,
-2.1314e-01, -7.2817e-02, 1.2036e-01,
-5.4180e-02, 1.0500e-01, 2.7821e-02,
-5.0657e-02, 8.7702e-02, 7.0234e-02,
9.0349e-02, 1.4905e-01, 1.1612e-01,
5.9924e-02, 2.4928e-01, 1.7078e-01,
-5.9110e-02, -7.4252e-02, 9.8241e-03,
-1.2006e-01, 1.3879e-01, -1.4322e-02,
-7.5463e-02, 1.4407e-02, -6.9202e-03,
7.0279e-02, 1.7065e-01, -2.5150e-01,
-2.6289e-02, 3.8421e-01, -2.2051e-01,
-2.8918e-02, 4.0074e-02, -7.1296e-02,
1.0357e-01, -1.8885e-01, 2.3780e-02,
-1.8884e-01, -4.3326e-01, -1.1465e-01,
3.3497e-02, -1.3462e-01, -3.4127e-02,
-1.2731e-02, 5.4326e-02, -2.6581e-02,
5.1753e-02, 6.8200e-03, 4.3246e-03,
-6.9963e-02, -1.5618e-01, 2.5192e-01,
2.2890e-02, 6.1421e-02, 5.2832e-02,
-9.8369e-02, -1.1452e-01, 1.7420e-01,
2.0392e-01, -1.1322e-01, 9.8462e-02,
-3.3547e-02, -2.8993e-01, 7.0080e-02,
8.2478e-02, -1.9881e-01, 1.2849e-01,
-2.7802e-01, -1.5621e-01, 6.2712e-02,
1.3028e-02, 1.4716e-01, 2.0434e-02,
-4.4071e-01, 3.8359e-01, -1.6655e-03,
-2.0297e-01, 1.5631e-01, 7.7086e-02,
9.6714e-03, -5.5842e-03, 7.9155e-03,
1.4525e-01, -3.2228e-01, 1.1454e-01,
1.4527e-01, -3.0399e-02, -6.7043e-02,
9.4233e-03, -1.1296e-02, -1.0927e-01,
7.9300e-02, 5.5286e-02, -1.1558e-01,
3.8173e-01, -5.4351e-02, -1.7890e-01,
5.4882e-02, 1.5119e-01, 1.8363e-01,
-8.8223e-02, -9.0083e-02, 4.8221e-01,
4.0890e-02, 5.6429e-02, -2.8538e-01,
1.2102e-02, -1.8177e-02, -3.1643e-03,
-6.9064e-02, 3.1853e-04, -7.0113e-02,
9.7308e-02, 1.0691e-01, -6.5919e-02,
-1.4536e-40, -1.7049e-40, -2.6781e-40,
4.5792e-40, 1.4489e-40, 1.3645e-40,
-5.8774e-40, -2.2505e-40, -4.7571e-40,
3.3670e-40, 1.5398e-40, -3.3819e-40,
2.6303e-40, -1.9434e-40, -5.5555e-40,
-4.3830e-40, -2.8750e-40, -3.0788e-41,
5.6364e-40, 3.1307e-40, -2.3064e-41,
2.8909e-40, -5.8115e-40, 2.9852e-41,
-1.9273e-40, -7.5503e-41, -6.0335e-40,
5.8073e-40, 2.9252e-40, -1.3038e-40,
5.2260e-40, 3.8172e-40, -2.0389e-40,
-2.1905e-41, 1.8473e-40, -2.9226e-40,
2.9957e-41, 2.6068e-40, 6.1324e-40,
-4.3013e-41, 5.1421e-40, -4.1157e-40,
2.1416e-41, -1.6614e-40, -3.0843e-42,
-4.3402e-40, 2.8507e-40, 1.1560e-40,
3.8826e-40, -3.0797e-40, -6.0685e-40,
5.4170e-40, -6.1858e-40, 9.3049e-41,
-1.9491e-40, -1.9211e-40, -6.2723e-40,
3.9906e-40, 1.2356e-40, 3.8682e-40,
2.8630e-40, 6.2303e-40, 5.3034e-40,
-4.1904e-40, 4.8916e-40, -3.6125e-40,
-5.5393e-40, -2.4980e-40, -6.1877e-40,
2.7289e-40, -1.8348e-40, -5.6663e-40,
2.5152e-02, -3.2878e-02, 2.1626e-02,
1.9879e-01, 2.9080e-02, -3.0331e-03,
-2.3380e-01, -2.3578e-02, 1.1871e-01,
-3.1824e-02, -5.5095e-02, 3.1338e-02,
-3.2199e-02, -4.3820e-01, 4.1391e-02,
-4.1207e-02, 3.7475e-01, -1.8548e-01,
-1.4460e-02, -8.7834e-02, -3.2343e-02,
2.4023e-01, 7.1916e-01, -1.8559e-01,
-6.7635e-03, -9.4409e-02, -1.7890e-02,
-5.8334e-02, 1.8886e-01, 6.1547e-02,
-2.6152e-01, 6.6722e-01, -1.2486e-01,
-4.8128e-02, 1.0510e-01, -4.2619e-02,
3.0101e-03, 9.6380e-02, 6.6140e-02,
1.0201e-01, -2.3240e-01, -1.8356e-01,
4.0019e-02, 2.2985e-01, -1.2980e-01,
-1.1400e-01, -1.9221e-01, -3.4158e-02,
2.2871e-02, -6.8684e-01, -1.0856e-02,
2.6311e-02, 2.5422e-02, -1.5190e-02,
3.2182e-02, -5.6346e-02, 3.2655e-02,
-1.6912e-02, 8.4264e-02, -7.9521e-02,
1.2788e-03, -7.1110e-02, 8.6585e-02,
-4.2829e-02, 1.0778e-01, -6.8129e-02,
5.8156e-03, -2.3998e-01, 1.9052e-01,
-4.1855e-02, 1.0140e-01, -1.7139e-02,
5.2301e-40, -2.9923e-40, 3.8688e-41,
3.1575e-40, 1.1504e-40, 5.5655e-40,
-3.4499e-40, 2.3050e-40, -6.3766e-41,
1.3282e-40, 4.5849e-40, 3.5308e-40,
-2.6657e-41, 5.9829e-40, 3.2791e-40,
-2.8348e-40, 2.5810e-40, 5.5791e-40,
4.2613e-40, 3.2607e-40, -2.0789e-40,
-3.9054e-40, -2.5608e-40, -2.7638e-40,
4.5027e-40, 2.7065e-40, -4.5593e-40,
1.6336e-40, -2.0391e-40, -5.9017e-41,
-7.9899e-41, -2.9870e-40, 5.6390e-40,
-2.5560e-41, -1.9786e-40, 9.4700e-41,
-7.4049e-41, -2.3902e-40, -2.8497e-40,
-1.8912e-40, -1.5589e-40, 5.5463e-40,
-2.1782e-40, -1.9532e-40, -2.3785e-40,
2.7539e-40, 4.0214e-40, 2.0732e-40,
7.0120e-41, -4.4200e-40, 7.3787e-41,
2.6452e-40, 1.1970e-40, 2.8298e-40,
5.2721e-40, 1.9304e-40, -3.8489e-40,
-3.9759e-40, 2.6184e-40, 1.2594e-40,
1.5831e-40, 3.7179e-40, -3.4915e-40,
-1.7681e-40, -6.9657e-41, -4.0746e-40,
8.0894e-41, 1.6950e-40, -1.0574e-40,
-1.0590e-40, 2.8466e-41, -2.7558e-40,
-5.4027e-40, 4.4355e-41, -3.2144e-40,
-4.8838e-41, -3.8595e-40, 2.5064e-40,
4.0365e-40, -1.0195e-40, 4.8356e-40,
4.4499e-40, -4.4871e-40, -2.4561e-40,
4.1687e-40, 5.2239e-40, -5.7603e-41,
-1.5211e-40, -3.5768e-40, 3.6385e-40,
1.6089e-40, 4.1624e-40, 4.5114e-40,
1.6438e-40, -3.6331e-40, 6.4961e-41,
5.0899e-40, 6.1036e-40, 2.4828e-40,
5.8681e-40, -5.7259e-40, -1.5371e-40,
5.2654e-40, 4.7412e-40, -2.0265e-40,
-4.8621e-41, 4.9497e-40, 3.0176e-40,
4.2235e-40, 4.5381e-40, 4.6501e-40,
-1.6124e-40, -1.9449e-40, 5.1497e-40,
-1.2891e-40, -1.6549e-40, 4.8348e-40,
-2.0735e-40, 1.3423e-41, -4.4109e-40,
-5.4218e-40, -1.1537e-40, -1.1664e-40,
5.6006e-40, 3.4109e-40, -3.1434e-40,
3.4969e-40, -5.3459e-40, 3.9245e-41,
2.4028e-40, 5.7774e-40, -6.2973e-40,
1.8802e-40, -4.6258e-41, -5.0716e-40,
3.4962e-40, -6.2313e-41, -2.7290e-40,
-5.2709e-40, -3.2225e-40, 2.4245e-40,
-3.6300e-40, -2.0794e-40, 4.0541e-40,
-3.5157e-02, 6.8337e-02, 1.6149e-02,
-5.8650e-03, 6.0605e-01, 3.1738e-02,
9.3306e-02, 2.1499e-01, 1.3609e-01,
6.4043e-02, -1.0253e-02, -6.2813e-04,
4.6828e-02, -3.9619e-01, -9.2633e-03,
-8.1752e-02, 9.9083e-02, 4.4296e-03,
7.1594e-02, 3.9860e-02, 8.1088e-02,
1.7750e-01, -1.2381e-01, 1.4476e-01,
2.3416e-02, 1.2819e-01, 1.0816e-02,
5.5296e-02, 5.5199e-02, -2.1253e-02,
1.7214e-01, 2.0542e-01, -3.7859e-03,
1.2831e-01, 3.2087e-02, -5.1851e-02,
-2.3686e-02, 1.2271e-01, -1.6009e-02,
-2.0176e-01, 7.4757e-01, -3.4526e-02,
-4.7055e-02, -3.7099e-01, -1.9216e-01,
-8.8030e-02, -2.5853e-02, -1.7087e-02,
-2.0533e-01, 1.5214e-01, -1.8639e-03,
-1.1236e-01, -2.4612e-01, 6.3094e-02,
2.3829e-02, -5.0078e-03, 5.3854e-02,
-9.6934e-03, 3.7047e-02, 4.7325e-01,
5.6975e-03, -8.6108e-02, 6.5569e-02,
-3.9768e-03, 2.0580e-02, -4.1931e-02,
6.9577e-02, -1.0416e-01, -2.5037e-03,
-1.9198e-02, 6.2027e-02, -1.0833e-02
}
,
{
-5.3430e-40, 2.5717e-41, 5.7504e-40,
7.1679e-41, 6.2076e-40, -8.4201e-41,
-4.2111e-40, 3.4851e-40, 1.3009e-40,
3.3016e-40, -7.6473e-41, -1.8392e-40,
2.2773e-41, 1.2087e-40, 1.1565e-40,
6.5190e-41, 2.0075e-40, 2.5796e-40,
5.0575e-40, -2.6261e-40, -2.5486e-40,
-3.9886e-40, -6.0644e-40, 2.9264e-40,
8.9627e-41, -3.0550e-40, -2.3456e-40,
-4.8855e-40, -4.8867e-40, -5.0492e-40,
-1.0706e-40, 5.3827e-40, -1.6413e-40,
1.4714e-40, -3.4024e-40, -4.4881e-40,
3.2361e-40, 2.0858e-40, 3.8836e-40,
2.0949e-40, 5.9633e-40, -1.7878e-41,
-4.1980e-40, -4.4383e-40, 2.7859e-40,
7.0317e-42, -8.9973e-41, 5.8700e-41,
1.8411e-40, -3.6097e-42, 2.7362e-40,
5.4341e-40, 6.0305e-40, 5.9004e-40,
5.2692e-40, -6.3449e-41, 1.2075e-40,
7.5297e-41, 8.9267e-41, 4.9139e-40,
-1.4609e-40, 3.1821e-41, 2.3288e-40,
3.1748e-41, -3.8052e-40, -2.4322e-40,
-5.7959e-40, 6.1966e-40, 3.4964e-40,
-5.6776e-40, -6.8327e-41, -3.3777e-41,
-5.9108e-02, 3.5468e-02, -2.8772e-02,
6.8602e-01, 1.4232e-01, 1.1954e-02,
-3.8234e-02, 7.1837e-02, -1.8832e-02,
4.7972e-02, 1.1623e-02, -2.1687e-03,
-4.9744e-01, 2.7751e-01, 1.7862e-02,
7.4286e-02, 3.1309e-03, 1.1030e-03,
-6.1084e-01, -8.5679e-03, 9.4956e-03,
-4.5246e-01, -1.2126e-01, -3.7368e-02,
2.5624e-02, 1.2087e-02, -1.5431e-02,
6.0313e-40, 1.8404e-40, -7.2006e-41,
6.0697e-40, -9.1199e-41, 5.8965e-40,
5.4830e-40, 1.3014e-40, 1.5585e-41,
-3.6027e-02, -6.3004e-03, 1.5237e-02,
6.0743e-01, 9.2523e-02, -4.7370e-03,
3.4407e-02, -8.3823e-02, 1.6898e-02,
5.7527e-40, -5.0621e-40, -2.9035e-42,
3.8199e-40, -2.2913e-40, -5.0895e-40,
4.0079e-40, 5.1744e-40, -3.3006e-40,
6.1448e-40, 1.2347e-40, -3.1673e-40,
7.3214e-41, 5.2143e-40, -2.6071e-40,
1.6109e-40, -2.0298e-40, 9.5817e-41,
6.9876e-02, -2.9290e-02, 3.2294e-03,
-4.2632e-01, 1.5789e-01, 3.6809e-02,
2.1220e-02, 1.6531e-04, 6.8502e-03,
-6.5221e-02, 8.8059e-02, 5.7934e-03,
-1.7280e-01, 1.5303e-01, 1.7663e-01,
-1.2908e-01, -1.1749e-01, 5.7887e-02,
1.0685e-01, 2.2763e-01, 3.3796e-02,
1.7629e-01, 3.8882e-01, 6.3540e-02,
6.4707e-02, 1.0046e-01, -8.1911e-02,
-3.9718e-03, 4.6416e-02, 4.7357e-02,
7.3694e-02, -1.6444e-01, 2.4784e-02,
-3.0808e-03, 2.7399e-02, -2.9216e-04,
2.4428e-40, -3.0160e-40, 2.3184e-40,
-4.9114e-40, 5.6685e-40, -3.6020e-40,
2.2618e-40, -2.8145e-40, 2.1149e-40,
2.3559e-02, -8.6949e-02, -3.8350e-02,
-2.9547e-01, 7.0187e-01, -8.3979e-02,
-2.8576e-02, -1.6538e-01, -5.2465e-02,
-1.6016e-40, -1.4760e-40, -2.1977e-40,
4.3180e-40, 4.1724e-40, -1.2969e-40,
-1.3023e-40, -1.0095e-40, -1.5965e-40,
-4.0721e-40, -4.1747e-40, -4.3706e-40,
-4.2838e-40, -4.5507e-40, -4.6023e-40,
-3.7435e-40, -3.9889e-40, -4.2249e-40,
-1.2429e-01, -3.5062e-01, -1.1418e-01,
-4.0787e-02, 6.1690e-01, -1.0085e-01,
1.6098e-02, 8.5100e-02, -1.1621e-02,
3.0709e-40, -4.4880e-40, -2.7530e-41,
-1.2649e-40, -5.3936e-40, 5.0995e-41,
4.4003e-40, -2.1211e-40, -6.6422e-43,
-1.8989e-40, -3.6631e-40, 4.1392e-40,
-3.9057e-40, -5.5599e-40, 6.9979e-41,
3.8983e-40, 5.6737e-41, 2.3997e-40,
-9.4862e-41, 2.4256e-40, -3.7040e-40,
1.6374e-40, 3.5439e-42, -1.0385e-40,
3.6145e-40, -2.4342e-41, -3.0115e-40,
-6.0009e-40, -5.2386e-41, -1.2504e-40,
2.9237e-40, -1.2290e-40, -1.1502e-40,
-3.5887e-40, -6.1810e-40, -1.6289e-41,
2.5438e-41, 5.1229e-40, -2.4915e-40,
1.3516e-40, 3.3553e-40, 8.5831e-41,
-8.5122e-41, 3.7625e-41, 2.5507e-40,
-1.5828e-40, 2.1991e-40, -1.5628e-40,
-5.3110e-40, 5.1395e-40, -5.8162e-40,
-3.1571e-40, -5.5139e-40, 1.2299e-40,
4.8855e-40, -9.3940e-41, -6.2534e-40,
-3.3275e-40, -2.4982e-40, -1.2956e-40,
-6.0047e-40, -1.8712e-41, -7.3274e-42,
-2.8519e-40, 3.5541e-40, 2.4485e-40,
-8.1435e-41, -2.7091e-40, 7.1206e-41,
-5.9519e-41, -2.5552e-40, -3.6189e-40,
7.7038e-02, -1.6317e-02, -2.4118e-02,
-4.3086e-02, -2.1512e-01, 1.2288e-01,
1.8237e-01, -1.5438e-01, -1.1346e-01,
-4.6141e-02, -4.0750e-02, -5.6414e-04,
-1.5640e-01, -3.4506e-01, -1.4441e-02,
-2.0278e-01, -3.1403e-01, -6.2542e-02,
-1.9622e-02, 1.6348e-02, 6.9859e-03,
-9.3142e-02, 1.0368e-02, -5.6585e-02,
8.4213e-02, 1.0776e-01, -1.0315e-01,
8.7873e-41, -5.3947e-40, 1.1714e-40,
7.5534e-41, -1.1871e-40, -5.4012e-40,
3.8269e-41, -1.4913e-40, -3.1802e-40,
-3.4707e-02, 1.2518e-02, 9.4679e-03,
1.2254e-01, 1.9394e-01, 2.6530e-02,
2.2413e-01, -1.6298e-01, -6.1446e-02,
-1.1042e-42, -2.7255e-40, -5.5067e-40,
3.8272e-40, 4.9956e-40, -3.2074e-41,
2.8351e-40, 4.2501e-40, 3.9389e-41,
6.1941e-40, -4.8790e-40, -3.4137e-40,
2.2577e-40, -5.7183e-40, -8.6861e-41,
5.7021e-40, -3.2349e-40, 1.9655e-40,
9.1180e-02, 5.6665e-02, -6.5437e-04,
1.1759e-01, 2.7517e-01, 1.9143e-01,
9.7905e-02, 6.6707e-02, 8.6535e-02,
8.8717e-03, 3.0913e-02, 6.6909e-03,
-8.1791e-02, -4.7883e-01, 7.4920e-02,
4.5843e-01, -1.0410e-01, 1.6655e-01,
-4.7094e-03, 3.4769e-02, -1.3291e-02,
-8.5570e-03, -4.0038e-01, 1.8418e-01,
-1.4696e-01, 3.2279e-01, 2.5712e-02,
-2.6207e-01, -4.6150e-02, -6.4099e-02,
-3.2623e-01, -1.8984e-01, -5.7891e-02,
-2.2088e-01, -4.2042e-02, -2.5307e-02,
1.0260e-40, 5.0443e-40, 7.5150e-41,
1.4402e-40, -5.1952e-40, -5.3810e-40,
6.2240e-40, 1.8661e-40, -8.2983e-41,
7.1850e-02, 4.8770e-02, -1.5081e-02,
4.8072e-01, 2.5477e-01, 3.8197e-02,
2.6011e-01, 2.4610e-01, -3.6167e-02,
3.8901e-40, 1.6760e-41, 2.8471e-40,
3.1983e-40, 1.2460e-40, -4.3961e-40,
3.9187e-40, 2.7818e-40, -9.1501e-41,
-2.3320e-40, -1.9998e-40, -2.8132e-40,
-2.9552e-40, -3.9643e-40, -5.1375e-40,
-1.6686e-40, -5.3138e-40, -2.6988e-40,
2.5623e-02, 2.6942e-02, 2.4342e-02,
-9.9084e-02, 5.2974e-01, -6.7983e-02,
-2.2454e-01, 1.1507e-01, 2.0364e-02,
3.4852e-01, -3.1091e-01, 8.1154e-02,
-3.2205e-01, 1.7103e-01, 2.4162e-01,
-2.6892e-03, 2.4142e-02, 5.5540e-02,
-4.5753e-02, -5.0097e-01, 1.7503e-01,
1.4058e-01, 1.1311e-01, 1.5945e-01,
-5.3975e-02, 5.2326e-02, -6.2382e-02,
9.4114e-02, -5.6812e-01, -1.2081e-01,
-8.5809e-02, -9.8661e-03, -2.3064e-02,
-1.6453e-03, -1.8328e-02, 2.4282e-03,
1.5943e-40, 4.6894e-40, -6.2730e-40,
3.8054e-40, -3.7914e-41, -1.4429e-40,
1.6925e-40, 5.1566e-41, -1.7909e-40,
-3.7920e-02, 2.4698e-01, 5.0019e-02,
-1.4246e-02, 2.8739e-01, -5.4704e-02,
7.9436e-02, -2.7838e-02, -3.4191e-02,
-3.3565e-40, 2.1368e-40, 6.7346e-42,
5.6681e-40, -5.5776e-40, -2.7705e-40,
-2.2966e-40, 1.1692e-40, -2.5187e-40,
4.4806e-40, -4.8424e-40, -9.1436e-41,
-4.3250e-40, -2.0721e-40, -2.0050e-40,
-5.1061e-40, 2.6405e-40, -3.0913e-40,
-1.2078e-01, 3.1948e-01, 1.0082e-02,
-1.0781e-02, 8.0720e-02, -4.6330e-02,
-1.8084e-02, -2.2846e-02, -5.5861e-03,
-3.2400e-02, -1.7329e-01, -2.7995e-02,
-5.3680e-02, 4.1310e-01, -9.4691e-02,
7.6938e-02, -4.9596e-02, 1.9649e-01,
3.2594e-02, 1.1544e-01, -1.8501e-02,
7.0248e-02, -6.9838e-02, -5.4278e-02,
-2.9317e-02, -1.4890e-01, 7.8661e-02,
3.7685e-02, 5.9594e-02, 8.9527e-02,
2.2957e-01, -2.9681e-01, -1.6329e-01,
-1.3206e-01, -4.3808e-02, 3.8854e-02,
1.7529e-40, -3.8429e-41, 1.4443e-40,
-4.0829e-40, -2.5643e-40, -5.4821e-40,
1.6827e-40, -1.1628e-40, 2.2441e-40,
5.2451e-02, 1.0179e-01, 4.8487e-02,
-2.1020e-01, -4.4345e-01, -8.7642e-02,
7.0958e-02, 1.9934e-01, -2.1090e-02,
-3.0795e-41, 2.7921e-40, 2.8491e-40,
-2.1154e-40, 9.8876e-41, -8.8824e-41,
2.6552e-40, 2.5767e-40, -3.8369e-40,
6.1348e-40, -3.4170e-40, -1.7109e-40,
-3.3080e-40, 5.4199e-41, -1.7512e-40,
1.8363e-40, -4.4080e-40, -2.5508e-40,
-4.0716e-02, -2.8531e-01, 3.9981e-02,
2.2278e-02, 5.6661e-01, -8.3890e-02,
-7.7331e-02, -9.3843e-02, 1.5584e-02
}
,
{
-3.6751e-40, -5.4562e-41, 6.1860e-40,
8.9003e-41, 5.5262e-40, 3.9537e-40,
-2.1258e-42, -3.1069e-40, -7.6225e-41,
-1.2220e-02, -8.6886e-02, 1.0714e-02,
1.1656e-02, -7.3635e-02, 5.9427e-02,
4.8518e-03, 1.3543e-01, 1.4668e-02,
-1.7505e-02, -2.0691e-02, -1.4507e-02,
2.6157e-02, 7.4109e-02, 1.2822e-02,
-1.9737e-02, -4.9281e-02, 8.5962e-03,
5.6236e-40, 2.4616e-40, 1.6384e-40,
-3.9469e-40, -1.7094e-40, 1.9285e-40,
-1.3634e-40, -1.5785e-40, 6.4184e-41,
-1.2752e-02, 2.3150e-02, -5.3355e-03,
-5.9667e-02, -3.9580e-01, -7.0033e-02,
-2.2612e-02, 1.9176e-02, 1.0588e-02,
8.0027e-04, 3.2242e-01, -2.2566e-02,
8.7850e-03, -2.4025e-01, 4.6123e-02,
-1.9038e-02, -8.5750e-03, -4.8153e-03,
-1.3049e-03, -5.7771e-03, 9.6437e-03,
3.2477e-02, 2.4482e-01, 4.0580e-02,
1.3194e-02, -4.6602e-01, -6.6163e-02,
-1.0647e-01, 7.3328e-02, 2.5871e-02,
-7.0883e-02, -9.2725e-02, -1.5185e-02,
1.1804e-02, 1.7784e-03, -4.4099e-03,
-4.9226e-40, -1.3081e-40, -3.5969e-40,
4.3539e-40, -2.9631e-40, 2.3531e-41,
5.6191e-40, 6.1545e-41, -1.1112e-40,
-1.1880e-02, -3.1884e-02, -2.0850e-02,
-6.8633e-03, 1.6422e-01, 1.0281e+00,
3.5887e-03, 2.1180e-01, -1.0094e-01,
-1.5103e-02, -4.9074e-02, -1.7702e-02,
7.2119e-02, 3.3199e-02, -9.7082e-04,
5.5383e-02, 1.0343e-01, 2.5156e-02,
2.9049e-40, -1.6397e-40, -8.8848e-41,
-6.2827e-40, 8.1281e-41, 5.2909e-40,
-4.1132e-40, 1.5751e-40, 1.5400e-40,
-7.3765e-02, -4.9723e-02, 4.9357e-02,
-2.4207e-02, -1.0291e-01, -1.4001e-03,
-1.2751e-02, 4.2805e-03, 1.8934e-03,
2.6862e-02, 1.1634e-01, 4.5666e-02,
-4.7351e-03, -4.1593e-01, 3.6082e-02,
1.1446e-02, -5.2026e-03, 1.8672e-02,
-7.0960e-04, -6.7877e-03, 9.6674e-03,
-4.9952e-03, 8.8664e-02, -2.7707e-02,
8.5309e-02, 5.5513e-02, -7.6230e-02,
3.6354e-02, 9.7794e-02, 1.1687e-02,
2.6847e-02, 3.2565e-01, -8.7710e-03,
-2.0372e-02, -1.9090e-02, -3.2566e-03,
-5.5592e-40, 7.4408e-41, 3.5576e-40,
2.7758e-40, 4.5458e-41, -6.2347e-40,
9.9739e-41, -1.6078e-40, -5.2900e-40,
1.1500e-02, -3.0675e-01, -3.0079e-02,
1.5080e-02, -2.4292e-01, 1.2736e-01,
-1.9513e-02, -1.9376e-02, -8.5960e-02,
-1.0241e-01, -2.1312e-02, -3.1999e-02,
-6.3598e-02, 1.5187e-01, 1.2279e-01,
1.5695e-03, 1.1376e-01, 5.2648e-03,
2.6415e-40, 3.0508e-40, 3.6407e-41,
-1.4403e-40, 2.8942e-40, -1.0089e-40,
2.2362e-41, 1.9843e-40, -1.5509e-40,
1.3269e-01, -3.1031e-01, -4.4091e-02,
4.6385e-03, 2.1411e-02, 5.7141e-02,
2.0724e-02, -3.5406e-02, 2.5717e-03,
-5.5922e-02, 7.1404e-01, -2.9852e-02,
1.3041e-02, 3.9373e-02, -2.4515e-01,
4.4278e-03, 2.1557e-02, -8.4940e-03,
1.3677e-02, -3.5183e-02, 1.2391e-02,
-9.2405e-02, 2.9650e-01, 6.9695e-02,
-3.3125e-02, 3.4700e-01, 1.4552e-01,
2.7357e-02, 5.2133e-01, -5.7571e-02,
2.7580e-02, 1.0381e-01, 1.3678e-02,
4.9260e-03, -4.4419e-02, 7.0651e-04,
2.9472e-40, -5.2892e-40, -3.6567e-40,
4.9403e-40, -6.2132e-40, -6.2920e-40,
-1.5156e-40, -3.6134e-40, 5.2432e-40,
-5.0427e-03, -2.8247e-03, -5.3734e-02,
-1.5918e-02, 1.8325e-01, -1.7834e-01,
-5.1774e-03, 8.0009e-02, 5.6296e-03,
3.1480e-02, 2.0665e-02, 2.7806e-04,
7.3085e-02, 7.7660e-01, 1.1979e-01,
1.9979e-02, 1.6629e-01, 2.3216e-02,
-5.9701e-40, 9.5583e-41, 1.8231e-40,
-3.3216e-40, -4.1253e-40, -3.3326e-40,
1.7131e-40, 2.9588e-40, -2.2520e-40,
-1.3337e-01, -4.2777e-01, -1.3569e-01,
2.9915e-02, -2.7016e-01, -3.7454e-03,
-1.3574e-02, -3.6298e-02, -1.6571e-02,
4.2530e-02, -4.2299e-02, 1.4320e-01,
1.4371e-02, -1.1289e-01, -3.8829e-02,
5.1689e-03, 1.5804e-02, 1.6125e-03,
-3.4601e-03, -7.2087e-03, -5.5514e-04,
4.4568e-02, 1.3621e-01, -4.3811e-02,
1.1350e-02, -2.8417e-01, 3.1553e-02,
-7.8854e-02, -2.0316e-01, 7.7746e-03,
-1.1437e-02, 2.1557e-01, -1.9479e-02,
-1.3511e-02, -2.0339e-02, -1.0276e-02,
-8.8977e-41, 5.9533e-40, -3.1413e-40,
-3.1892e-40, 5.5204e-40, -5.0634e-40,
-2.4932e-41, 4.3474e-41, 6.2961e-40,
4.7864e-03, 5.7125e-02, -1.5468e-02,
-3.9614e-03, -2.9042e-02, 2.8347e-01,
-1.0133e-02, 8.2745e-02, -1.0450e-01,
5.9537e-03, 1.4050e-02, 1.9802e-04,
2.4964e-02, 1.3077e-01, -4.7314e-02,
6.2744e-03, -1.9068e-01, 5.2593e-02,
-2.0550e-40, -2.4231e-40, 3.3927e-40,
-3.9609e-41, 2.2262e-40, 1.8866e-40,
2.0788e-40, -1.8012e-40, -1.9375e-40,
-4.7530e-03, -1.2315e-01, 8.2373e-03,
-9.2412e-02, 1.7156e-01, 1.1176e-02,
-1.4081e-02, 1.4694e-02, -1.9475e-02,
-1.5269e-02, -3.8430e-02, -7.4717e-02,
3.3361e-02, -1.1956e-01, 4.2304e-01,
-2.9924e-03, -3.3035e-02, -3.6560e-02,
-1.2386e-02, 6.3762e-03, -3.7047e-02,
1.3839e-02, -3.6358e-02, 4.3609e-02,
-8.3692e-03, 4.5794e-01, -3.0761e-01,
2.2287e-02, 2.5360e-02, -6.1253e-03,
-1.8992e-02, -4.0078e-01, 7.3821e-02,
5.6517e-03, 4.2348e-02, -2.5642e-02,
5.5659e-40, -6.1219e-40, 4.1493e-40,
5.7719e-42, -3.7181e-40, -3.3260e-40,
-4.8241e-41, 5.2207e-40, -1.2199e-40,
-1.2074e-02, 1.7647e-01, 1.1882e-02,
6.4764e-03, -2.3742e-01, -1.8033e-01,
2.5866e-02, 6.5985e-02, 3.7191e-02,
5.1047e-02, -3.0457e-02, 1.2531e-02,
-1.3252e-01, 1.2593e-01, -6.3717e-02,
4.0794e-02, -1.4786e-02, 1.7139e-02,
2.4343e-40, -1.7451e-40, 2.0169e-40,
-5.5166e-40, 2.4201e-40, -2.5701e-40,
2.9947e-40, 2.9321e-40, -1.6015e-40,
-3.6598e-02, -1.8520e-03, -1.6999e-01,
-8.6806e-02, -7.7266e-02, -9.6042e-02,
-2.1342e-02, 2.5793e-02, -7.2541e-03,
3.0667e-02, -2.6287e-01, 3.0592e-02,
-4.5559e-02, -1.4716e-01, 2.0932e-01,
-5.8472e-03, -1.0023e-02, 1.2134e-02,
-1.3284e-02, 2.0538e-02, -5.4476e-04,
5.8096e-02, -1.4790e-02, -2.0158e-02,
-3.9654e-02, -2.2069e-01, -1.5089e-01,
-1.8966e-01, -1.6834e-01, 9.8934e-02,
8.2326e-02, 7.5585e-02, -1.7188e-02,
-1.4985e-02, 2.1823e-02, -7.7015e-03,
1.8353e-40, 4.8298e-40, -2.0568e-40,
-3.7196e-40, -5.7237e-40, 1.0648e-40,
9.4960e-41, 3.0411e-40, 1.3294e-40,
-1.4884e-02, 4.9767e-02, -3.0288e-02,
8.9874e-03, -1.0290e-01, 3.1344e-01,
5.9735e-03, -2.0813e-01, -6.6145e-03,
1.6592e-02, 3.0529e-05, -1.0180e-02,
-4.8683e-02, 1.4025e-01, 2.9237e-02,
-2.3334e-02, -9.6638e-02, -1.0268e-02,
-4.9497e-41, -5.6377e-40, -2.0142e-40,
2.1230e-40, 1.6067e-40, 3.4830e-40,
-4.9031e-40, -3.0290e-40, -2.9060e-40,
3.4053e-02, -8.9560e-02, -4.4479e-02,
4.2128e-02, 6.9253e-02, -7.1096e-03,
4.2358e-02, -1.7215e-02, 9.0389e-03,
1.8129e-02, -1.4785e-01, 1.1267e-01,
-7.1637e-02, 5.5595e-01, -1.0569e-02,
1.8481e-02, -4.7556e-02, -1.1185e-02,
-1.1766e-02, -8.5959e-03, -3.0046e-02,
-2.1081e-03, 1.1518e-01, -8.4419e-02,
-7.5829e-02, 1.8199e-01, -9.7726e-03,
3.6473e-02, 1.8761e-01, 4.9495e-03,
-6.9640e-02, -2.8775e-01, 3.6149e-02,
9.6345e-04, 1.3967e-02, -6.0015e-03,
2.9861e-40, 3.9190e-40, 5.3741e-40,
3.8059e-40, 4.7113e-40, 5.9498e-40,
-5.0640e-40, -4.1610e-40, 6.2009e-40,
-2.3464e-03, -7.3888e-02, 3.4701e-02,
-5.2257e-04, 3.8444e-02, -5.3735e-01,
-1.7970e-03, 9.0298e-02, 5.3151e-02,
-2.6033e-02, 1.2973e-02, 4.9147e-03,
2.3005e-02, 1.7045e-01, 2.4715e-02,
2.7981e-02, -8.4662e-02, -9.4778e-03,
5.3019e-40, -2.1800e-40, 1.5281e-40,
-1.0282e-40, 1.8040e-41, 1.3929e-40,
-5.9679e-40, -5.2958e-40, 1.4429e-40,
3.4325e-02, -1.7240e-01, -4.9645e-02,
-2.4341e-02, 5.2652e-02, -1.1188e-02,
-3.6336e-03, 4.2148e-04, 3.3086e-03,
5.5059e-03, 1.7744e-01, -2.8681e-02,
-3.4868e-03, -1.4569e-01, 1.6508e-02,
4.6766e-03, -1.7963e-02, -2.6397e-03,
4.3618e-03, -4.2793e-03, -4.7820e-04,
-4.2795e-02, 2.0070e-01, 3.8402e-02,
5.0586e-02, 2.1910e-01, -3.4381e-02,
5.7625e-02, 4.2314e-01, -1.9732e-02,
3.4811e-02, -2.3033e-01, 1.1477e-02,
-7.3744e-03, 1.9112e-02, 4.2251e-03
}
};
static __device__ __constant__ const float HDNL0biasL[8][8] =
{
{
0.0272, -0.5743, -0.0333, -0.0334, 0.0082, -0.0263, -0.0048, -0.0167
}
,
{
-0.0239, -0.0385, 0.0026, 0.0288, -0.0225, 0.0082, -0.0191, -0.0185
}
,
{
-5.8305e-03, -8.6574e-02, 4.2228e-02, -4.3500e-02, -8.1892e-04, 3.3171e-03, -1.1582e-02, -4.1205e-40
}
,
{
-0.0053, 0.0053, -0.0114, -0.0127, -0.0039, -0.0426, 0.0053, -0.0017
}
,
{
-0.0046, -0.0104, -0.0087, -0.0040, 0.1077, 0.0347, -0.0165, 0.7296
}
,
{
8.7612e-02, 5.9126e-01, 4.6709e-03, -1.1559e-39, 2.3381e-02, -1.2136e-40, -5.6040e-39, 3.7100e-02
}
,
{
-3.3246e-39, -1.4536e-02, -6.3362e-02, 8.5347e-41, 7.9956e-02, 3.0679e-04, -1.0257e-02, -1.2037e-02
}
,
{
-0.0006, 0.0117, 0.0083, 0.0686, -0.0046, 0.0015, -0.0076, 0.0079
}
};
static __device__ __constant__ const float HDNL0kernelsL10[4 * 8] =
{
0.4908, -0.0457,
-0.1716, -0.2115,
-0.0015, -0.3152,
0.3045, 0.0330,
-0.2981, 0.0912,
0.0122, 0.2281,
0.3331, 0.2853,
0.2210, 0.2611,
0.2364, 0.0792,
0.2885, -0.7122,
-0.3715, 0.1404,
-0.0260, 0.2144,
0.2378, 0.1570,
-0.5734, 0.2077,
-0.0851, 0.2771,
0.0415, -0.1858
};
static __device__ __constant__ const float HDNL1kernelsL1[9 * 8] =
{
-6.6326e-02, -2.2316e-01, 4.2471e-02,
1.7064e-02, -6.8305e-01, -1.5978e-01,
6.7568e-01, 3.2212e-01, 8.3561e-02,
-4.6649e-01, -6.8789e-02, 5.3455e-01,
-5.0941e-01, 7.0657e-02, 4.5647e-01,
-2.3657e-02, 3.5302e-02, -1.8316e-02,
-2.0316e-01, 4.7021e-02, -2.2313e-01,
5.3465e-02, 7.0750e-01, 9.1366e-02,
-2.8566e-01, -2.0521e-02, -7.1786e-02,
4.8186e-02, -9.3429e-02, 2.4493e-03,
3.4654e-01, 7.2625e-02, 1.6615e-01,
3.2101e-01, 3.2923e-01, -9.8548e-02,
1.1916e-02, 2.0413e-01, -1.8920e-02,
6.0858e-02, 8.3548e-01, 1.4060e-01,
-9.1827e-01, -2.4551e-01, -4.6118e-02,
-5.2737e-02, 4.3151e-01, 1.7027e-01,
2.6647e-01, 5.5240e-01, 3.4745e-03,
5.3495e-02, -4.7059e-02, -2.6593e-02,
1.5691e-01, 4.7332e-01, 2.6651e-03,
1.7997e-02, 4.1367e-01, 1.3239e-02,
4.6932e-02, 1.0278e-01, 1.0699e-02,
-3.4319e-02, -7.6373e-01, -9.7022e-02,
-1.4160e-01, 2.9567e-01, 6.6220e-01,
7.3508e-05, 1.2683e-01, -6.3442e-02
};
static __device__ __constant__ const float HDNL1biasL1[8] =
{
-0.0264, -0.0229, -0.3021, -0.2579, -0.0327, -0.0053, -0.7777, 0.0232
};
static __device__ __constant__ const float HDNL1kernelsL[8][9 * 8 * 8] =
{
{
-7.8588e-41, -5.0770e-40, -2.3334e-40,
5.7174e-40, 6.9060e-41, 2.2264e-40,
-4.1631e-40, 4.5667e-40, -1.8115e-40,
-3.1000e-40, 3.1019e-40, 5.5423e-40,
-5.8518e-40, 2.1290e-40, -5.4579e-40,
-3.7753e-40, 3.6029e-40, -1.7875e-40,
4.2296e-40, 6.5672e-41, 1.4976e-40,
-3.1479e-40, -3.2881e-40, -5.9818e-40,
3.2053e-40, 3.0821e-40, 5.1321e-40,
-2.6557e-17, -3.8205e-17, -3.7077e-17,
-2.5168e-17, -3.4817e-17, -3.4186e-17,
-1.8056e-17, -2.3105e-17, -2.2581e-17,
5.9355e-40, 2.4052e-40, -1.0027e-40,
2.2060e-40, 3.4864e-40, -5.7403e-40,
4.6936e-40, -3.3951e-40, -4.7715e-40,
-9.7917e-11, -1.0331e-10, -9.6141e-11,
-1.0581e-10, -1.1173e-10, -1.0317e-10,
-1.0192e-10, -1.0681e-10, -9.8738e-11,
-1.0402e-29, -2.3233e-29, -1.7882e-29,
-1.4804e-29, -3.7821e-29, -3.0750e-29,
-1.0448e-29, -2.6740e-29, -2.1676e-29,
4.2124e-40, 2.5024e-40, 4.5312e-40,
-2.4880e-40, 2.9838e-41, -2.7215e-41,
-2.6347e-40, 1.5950e-40, 9.3734e-41,
-1.4936e-01, -1.0438e-01, 2.9827e-02,
1.4751e-02, -1.6854e-01, -8.8101e-02,
4.9228e-02, -3.0744e-02, -1.1512e-01,
-3.4996e-02, -2.5024e-02, -1.8880e-02,
3.0008e-02, 4.8689e-02, -1.3415e-01,
-9.1698e-03, -1.1019e-02, -5.0655e-02,
-6.6579e-02, -2.6447e-02, 1.9791e-02,
-4.1727e-02, 3.6433e-02, 3.1516e-02,
-5.7619e-02, 2.3401e-02, 3.0785e-02,
-3.3610e-02, 1.2263e-01, 2.4351e-02,
1.7148e-02, 1.7144e-01, 4.0305e-02,
8.7902e-03, -7.0077e-02, -1.0688e-01,
4.7460e-02, -1.4093e-03, -1.5911e-02,
-2.2978e-02, 9.9025e-02, 1.2867e-02,
3.4704e-02, 1.4672e-01, 7.9188e-02,
-4.4222e-02, -3.9480e-02, -1.9193e-01,
-3.1897e-02, 1.0776e-01, -5.2742e-02,
8.0377e-02, 2.5764e-01, -9.7330e-02,
-1.1593e-01, -5.3753e-02, -2.8918e-02,
6.7939e-02, 2.3963e-01, 2.0856e-01,
2.7964e-02, 2.7781e-01, 2.1859e-01,
-1.5196e-02, 9.6704e-03, -8.0136e-02,
8.9441e-02, 1.0314e-01, -2.0204e-02,
-3.3970e-02, -1.4562e-02, 3.4723e-02,
2.3357e-40, -1.4361e-40, 2.0498e-40,
-5.2355e-40, -6.0151e-40, -2.9264e-40,
1.9715e-41, 5.9793e-41, -1.3675e-40,
5.3771e-40, 6.5637e-41, -3.8471e-40,
-3.0820e-40, -1.7004e-40, -1.9371e-40,
-5.1159e-40, 7.3244e-41, 3.5861e-41,
2.8441e-40, 4.5248e-41, 1.9771e-40,
-2.4681e-40, 3.6054e-40, 3.3496e-40,
-6.5048e-42, -1.6001e-40, 4.8243e-41,
-1.0165e-08, -9.9140e-09, -9.6054e-09,
-1.0511e-08, -1.0256e-08, -9.9066e-09,
-1.0521e-08, -1.0320e-08, -9.9896e-09,
2.6042e-40, 4.2016e-40, 5.3537e-40,
1.4594e-40, 1.1344e-40, 3.5144e-40,
-2.5736e-37, -1.3591e-39, 2.1029e-40,
-3.1420e-07, -3.0309e-07, -2.9630e-07,
-3.1196e-07, -2.9967e-07, -2.9249e-07,
-3.1296e-07, -3.0086e-07, -2.9332e-07,
-6.1256e-12, -5.9283e-12, -5.6508e-12,
-6.5297e-12, -6.4118e-12, -6.0667e-12,
-6.8382e-12, -6.8547e-12, -6.5225e-12,
-5.0327e-26, -1.0795e-25, -1.8952e-25,
-2.4220e-26, -5.9067e-26, -1.1323e-25,
-2.1499e-27, -5.5342e-27, -1.0333e-26,
4.5039e-03, -1.3303e-02, 1.6183e-01,
6.5951e-02, -7.1353e-02, 1.7254e-01,
-1.8671e-03, 1.0593e-01, -3.6872e-02,
4.9102e-02, -2.4075e-03, 4.8194e-02,
-7.0892e-02, -1.8948e-01, -1.6586e-01,
-2.8102e-02, 2.0870e-02, 5.9228e-02,
1.2673e-02, 3.3908e-02, 4.8282e-02,
4.4369e-02, 5.6304e-02, 1.2225e-02,
4.1855e-02, 1.1990e-01, 6.3799e-02,
-7.3884e-02, 1.4153e-02, 9.5825e-02,
4.2850e-02, -3.5337e-02, 1.3615e-01,
-2.0900e-01, -2.2835e-02, -8.6987e-02,
-6.7793e-02, 1.3547e-01, -9.9666e-02,
3.5498e-02, 5.3725e-02, 1.1501e-01,
-1.2238e-01, 3.5354e-02, 7.4216e-02,
-3.5288e-02, 7.0111e-03, 2.4820e-02,
-1.0649e-02, 1.6715e-01, 1.2825e-01,
3.1145e-02, 1.2097e-01, -1.2073e-02,
-7.0603e-02, 5.5574e-02, -5.0025e-02,
-8.2885e-02, 1.0957e-01, 1.3311e-01,
2.9147e-02, -1.1849e-02, 8.9953e-02,
-3.2247e-02, -1.0747e-02, 9.1431e-03,
1.2114e-01, -5.9780e-02, 5.4821e-02,
-5.2592e-02, -6.9082e-02, -7.5981e-02,
-7.8533e-02, 1.3658e-01, 1.0923e-01,
-3.2530e-02, -2.1342e-01, -1.2200e-01,
-1.9196e-02, 1.0450e-01, -8.9044e-02,
-2.0110e-02, 6.1439e-02, -2.7405e-02,
6.0823e-02, -6.4268e-03, -9.1778e-03,
6.4877e-02, -6.1227e-02, -5.4466e-02,
9.6375e-02, 1.7519e-01, 5.0725e-03,
1.9159e-01, 3.9725e-01, 1.2851e-01,
-6.9197e-02, 4.9372e-02, -3.4221e-02,
1.1583e-01, 1.3389e-01, 2.9135e-01,
1.0290e-02, 1.1214e-01, 1.7560e-01,
-1.8048e-02, 8.4782e-02, 4.9925e-02,
-3.8447e-02, -1.3156e-01, -1.1072e-01,
1.8256e-01, 2.2831e-01, -1.6508e-01,
4.6781e-02, 1.4913e-01, -8.6956e-02,
5.1365e-04, 6.7873e-02, -3.4787e-03,
1.7689e-01, 1.8414e-01, 2.2286e-01,
1.2571e-01, 1.7687e-01, 1.5949e-01,
5.9904e-02, 1.6259e-01, 1.4313e-01,
2.2234e-01, 4.0943e-01, 3.1469e-01,
1.9799e-01, 4.3052e-01, 3.0510e-01,
1.2259e-01, -1.0778e-02, 6.2284e-03,
1.4508e-02, -6.9073e-02, 5.0998e-02,
5.2962e-02, -1.5291e-01, -1.0491e-02,
-8.6903e-02, -1.0430e-01, 3.0130e-02,
4.1691e-02, -1.2675e-01, -5.5169e-01,
8.9644e-02, 3.6910e-02, -1.5459e-01,
5.3656e-03, 6.7936e-02, 1.0793e-01,
-2.7424e-02, -1.7652e-01, -3.5776e-01,
2.4593e-02, -5.6237e-01, -5.9038e-01,
-9.4807e-02, -7.5681e-02, -3.6990e-02,
8.7385e-03, -5.7989e-02, -4.9573e-02,
-7.7422e-02, -1.1899e-01, -7.4023e-02,
9.1539e-03, -1.1760e-01, 4.6825e-02,
1.9901e-02, -3.9718e-02, 1.2997e-02,
4.2209e-02, -5.2119e-02, -1.2255e-01,
2.4262e-02, 5.3676e-02, -2.4767e-01,
-4.2933e-02, -2.2473e-01, -4.0310e-01,
-3.5160e-02, 1.9858e-01, -1.5943e-01,
1.3208e-01, -1.0493e-01, -6.7076e-02,
-2.5244e-01, 1.1175e-02, 2.5568e-01,
-3.3867e-01, 3.1953e-02, 5.9426e-01,
4.0551e-02, 4.4914e-03, -1.9348e-02,
-6.7386e-02, -1.5543e-01, -3.0883e-02,
8.9177e-02, -4.6432e-02, 6.8227e-02,
8.7784e-02, 3.6127e-02, -2.0375e-02,
4.5461e-02, -4.9071e-02, 9.9435e-02,
-2.5700e-01, -2.7706e-01, 6.2776e-02,
-6.9571e-02, -5.7888e-03, 9.3852e-02,
2.8490e-02, -2.7854e-01, 1.4209e-01,
1.5373e-02, -4.3503e-02, 9.6895e-02,
1.1682e-02, 1.5608e-01, 1.5844e-01,
5.8027e-02, 2.6632e-02, -8.5479e-03,
1.2836e-01, 2.0714e-01, 1.0228e-01,
1.4647e-02, 5.7609e-02, -1.6728e-02,
2.1212e-01, 3.2673e-01, 4.5670e-02,
-6.0844e-02, -1.1768e-01, -1.1233e-01,
5.0123e-04, 6.3947e-02, -1.8356e-01,
1.4091e-01, -2.1568e-02, 8.5933e-02,
-3.9406e-02, 8.2921e-02, -1.0601e-01,
4.1284e-02, -7.3138e-02, 1.7264e-01,
2.5883e-02, 5.2945e-01, 2.4510e-01,
2.7291e-03, 4.0173e-02, 7.8221e-03,
-3.5795e-02, -4.8631e-03, -2.2715e-01,
1.2330e-01, 7.1739e-01, -4.1725e-01,
7.5106e-02, 2.5267e-02, -2.8655e-01,
-7.8731e-02, -7.5747e-03, -5.5601e-02,
7.9764e-02, 1.0524e-01, 8.6742e-03,
2.1791e-02, 3.7304e-02, -1.1534e-01,
-1.2011e-01, -7.5160e-02, 1.3737e-02,
-2.9470e-01, 2.6613e-01, -2.3740e-02,
1.2957e-01, 1.4752e-01, -9.3655e-02,
2.9828e-02, 2.0664e-01, 1.9731e-02,
-8.0378e-02, -3.9481e-01, -1.5395e-01,
-5.7944e-02, -8.6343e-02, -5.4324e-02,
7.1664e-02, 1.5294e-01, -1.2112e-02,
2.1023e-02, 1.1945e-01, -7.2998e-02,
-1.1693e-02, -1.8818e-01, -9.8693e-02,
-6.7017e-02, 6.9767e-02, -5.0268e-02,
-9.1106e-03, 2.4267e-01, 6.0277e-02,
3.5269e-02, 7.7376e-02, 1.6642e-02,
-5.2600e-02, -1.8864e-01, -1.1195e-01,
3.2119e-01, -9.7913e-02, 1.4734e-01,
8.6988e-02, -5.3563e-03, -2.6136e-03,
-9.1528e-03, 2.8186e-01, -1.5933e-01,
4.8499e-02, 4.5189e-01, -1.6399e-01,
5.8164e-02, 6.3251e-02, -2.8738e-02,
2.0424e-01, -7.2819e-02, 2.1903e-02,
-3.5630e-01, 1.3171e-01, -7.6749e-02,
3.8848e-02, 1.7902e-01, -1.1902e-01,
-4.4221e-02, 1.5032e-02, 2.9078e-02,
-1.9738e-01, -1.4878e-02, 1.3315e-02,
1.3956e-02, 1.2856e-01, 7.0688e-02,
2.0933e-01, 1.7286e-01, 6.7601e-02,
5.5136e-01, 4.6866e-01, 1.8402e-01,
2.2362e-01, 2.4124e-01, 1.3167e-01
}
,
{
-5.2308e-12, -5.4024e-12, -5.0039e-12,
-5.4553e-12, -5.6928e-12, -5.2812e-12,
-5.0230e-12, -5.2150e-12, -4.9133e-12,
5.7994e-02, 1.0051e-01, -1.0618e-01,
6.8090e-02, 1.2789e-01, 1.1380e-01,
-1.5882e-01, 8.2323e-03, -9.1424e-02,
2.0132e-07, 2.0907e-07, 2.1344e-07,
2.1179e-07, 2.2018e-07, 2.2381e-07,
2.1095e-07, 2.1920e-07, 2.2150e-07,
2.9336e-02, 5.4427e-02, -1.2082e-01,
5.8399e-02, 2.2261e-01, 1.1165e-01,
-9.6098e-02, 8.3175e-02, -6.5909e-02,
1.2007e-01, 1.9776e-01, 7.7464e-02,
6.7018e-02, 3.6536e-01, 1.3796e-01,
6.0724e-02, 4.6161e-02, 2.3740e-01,
-2.1117e-02, -2.0200e-02, 9.3703e-02,
-4.6932e-02, -1.5910e-01, 8.8094e-02,
-5.6641e-02, -1.7146e-01, -1.0502e-01,
-2.5624e-01, 1.6049e-01, -3.3267e-02,
-2.3248e-01, 5.4036e-01, 1.0027e-01,
-2.1680e-01, -7.0096e-03, -1.0692e-01,
-4.8357e-02, 2.5107e-01, 4.8323e-02,
9.7245e-02, 5.5015e-01, -3.4641e-01,
1.2458e-02, -1.3626e-01, -4.1992e-01,
-2.1359e-40, -1.4250e-40, -4.7123e-40,
-5.9433e-41, 1.9903e-41, -1.7701e-40,
-5.9941e-40, -5.8562e-40, -5.0226e-40,
-2.6581e-40, 1.3006e-40, -1.4201e-40,
5.4264e-40, 2.3848e-40, 5.6412e-40,
-2.6378e-41, -5.7132e-40, -4.1343e-40,
-3.2848e-22, -3.6697e-22, -3.4147e-22,
-3.5780e-22, -3.9435e-22, -3.5989e-22,
-3.1212e-22, -3.4305e-22, -3.0670e-22,
-1.1749e-08, -1.1602e-08, -1.1494e-08,
-1.2125e-08, -1.1918e-08, -1.1718e-08,
-1.1779e-08, -1.1623e-08, -1.1559e-08,
-5.0237e-07, -4.9179e-07, -4.6744e-07,
-5.1967e-07, -5.0826e-07, -4.8421e-07,
-5.0226e-07, -4.9668e-07, -4.8019e-07,
5.6433e-41, -3.0514e-40, -5.4526e-40,
1.1125e-41, 2.9485e-40, 5.5282e-40,
3.0229e-40, 1.5915e-40, 5.3759e-40,
-6.1144e-27, -9.2380e-26, -2.4302e-25,
-9.3834e-25, -1.0289e-23, -1.9513e-23,
-4.3746e-24, -4.4359e-23, -7.0505e-23,
-8.1604e-36, -3.2928e-37, -2.2994e-40,
-3.9543e-37, -9.9513e-39, 7.4616e-41,
-4.0044e-39, 4.4392e-40, 4.8856e-40,
-3.3447e-40, -3.9935e-40, 2.4649e-40,
2.0207e-40, -3.0245e-40, -7.1986e-41,
6.2938e-40, -3.6922e-40, 1.5296e-40,
-6.4982e-41, 5.0849e-41, 5.7873e-40,
1.4327e-40, -4.2163e-40, 1.3807e-40,
2.8569e-40, 1.9139e-40, 3.2985e-40,
-5.4410e-40, 2.3070e-40, 2.1690e-40,
-1.5964e-40, -2.2781e-40, 5.6766e-40,
2.2533e-42, -2.5532e-40, -5.5822e-40,
5.7249e-40, 5.3555e-40, -4.9107e-41,
1.7538e-40, -1.2312e-40, 5.0077e-40,
6.1500e-40, 1.9980e-40, 6.2953e-40,
-7.5314e-23, -9.4299e-23, -7.1342e-23,
-8.5139e-23, -1.1237e-22, -9.0478e-23,
-6.2038e-23, -8.5180e-23, -7.3015e-23,
5.0613e-40, 1.5224e-40, -1.8977e-40,
2.4108e-41, -5.1771e-40, 6.2317e-40,
1.0465e-40, 2.8816e-41, 6.2500e-40,
3.5727e-40, 4.2717e-40, -3.5900e-40,
-4.4831e-40, 3.4260e-40, -4.8293e-40,
-2.4133e-40, 3.1140e-40, -2.0777e-40,
-2.2906e-41, 3.5923e-40, -4.4443e-40,
-4.6615e-40, -2.1123e-40, 4.5700e-40,
-4.6360e-40, -3.6052e-40, -3.4319e-40,
-3.6575e-40, -3.5707e-40, -3.0530e-41,
4.2531e-40, -1.2255e-40, -3.9607e-40,
3.5903e-40, -5.4630e-40, -3.1460e-40,
2.8820e-40, 4.9460e-40, 6.1461e-40,
8.9118e-41, -4.6579e-40, -2.4172e-40,
-5.5474e-40, -8.1848e-41, -1.6910e-40,
-1.6272e-25, -1.8802e-25, -1.7229e-25,
-1.7850e-25, -2.0338e-25, -1.8235e-25,
-1.4715e-25, -1.6733e-25, -1.4681e-25,
-5.5471e-09, -5.6862e-09, -5.7043e-09,
-5.8727e-09, -5.9823e-09, -5.8983e-09,
-5.8040e-09, -5.8670e-09, -5.7388e-09,
-9.7253e-07, -9.7248e-07, -9.4623e-07,
-1.0149e-06, -1.0042e-06, -9.6709e-07,
-1.0139e-06, -9.9930e-07, -9.5295e-07,
-4.5042e-40, 2.6725e-40, 2.3181e-40,
-4.6274e-41, -1.1799e-40, 5.0685e-40,
-1.0765e-40, 3.3322e-40, -6.1905e-40,
-1.3653e-34, -3.4690e-33, -1.1578e-32,
-1.4444e-31, -2.1995e-30, -4.8668e-30,
-1.2965e-30, -2.0189e-29, -3.3962e-29,
-2.5057e-40, 7.2876e-41, 4.5731e-41,
-1.6525e-40, 5.0987e-40, -5.4683e-40,
8.1836e-41, 6.2722e-40, -3.1057e-40,
4.0987e-40, 3.5941e-40, 5.1680e-40,
5.5563e-40, 3.1011e-40, 4.7068e-40,
1.0426e-40, -1.0803e-40, 4.4867e-40,
-4.9675e-03, 1.5412e-01, -4.1930e-03,
-6.1089e-02, 2.0405e-01, 1.9587e-01,
3.8772e-02, 1.6894e-01, -2.6163e-02,
1.0839e-30, 1.8608e-30, 1.1386e-30,
1.4863e-29, 1.9422e-29, 1.1639e-29,
1.7504e-29, 2.2177e-29, 1.3629e-29,
6.4484e-02, 6.6296e-02, 2.2838e-01,
-1.0213e-01, 7.5883e-02, -1.7531e-01,
-1.4869e-01, 1.0736e-01, 1.4129e-01,
-2.8235e-02, -2.9232e-02, -9.3912e-02,
5.1317e-02, 9.0256e-02, -2.4669e-02,
-3.2465e-02, 5.8099e-02, 9.8402e-02,
-2.3135e-01, -1.3786e-01, 2.8581e-01,
-3.2410e-01, -2.6623e-01, 6.1583e-02,
1.8696e-01, 4.7251e-02, -2.3520e-01,
2.5630e-02, -1.2358e-01, -1.5735e-01,
-1.2198e-01, 5.1970e-01, 1.9976e-01,
-1.2515e-01, 9.8768e-02, 5.8917e-02,
-3.8569e-02, -9.2729e-02, -1.8982e-01,
1.1378e-01, 5.7195e-01, -1.8265e-01,
-3.5724e-02, -2.1379e-01, -2.2129e-01,
-5.1198e-40, -3.4709e-40, 6.2940e-40,
-2.2134e-41, -3.6133e-40, -2.7075e-40,
-5.9664e-40, -2.3937e-40, 3.0876e-40,
9.1814e-41, 9.5898e-41, -3.1892e-40,
3.1093e-40, 2.7935e-40, 1.7966e-40,
-2.3967e-40, 4.0806e-40, 6.2012e-40,
5.3771e-41, 6.1000e-40, -4.6695e-40,
5.9474e-41, -4.9675e-40, 5.7403e-41,
4.7091e-40, -5.0751e-41, 3.9864e-41,
-9.7756e-41, 2.7978e-40, -5.0791e-40,
-3.4321e-40, -7.0774e-41, -5.2651e-40,
2.8034e-40, -3.3452e-40, 1.9535e-40,
-6.2300e-40, -1.8372e-40, -1.9038e-40,
-5.6564e-40, -6.1257e-40, -1.0338e-40,
-1.7191e-41, -1.2843e-41, 5.0707e-40,
-4.4587e-40, 2.7128e-40, -1.4155e-40,
-5.7475e-40, -3.4612e-40, -4.7424e-40,
1.7235e-40, -6.0028e-40, -1.6342e-40,
-5.1072e-40, -2.4721e-40, -2.8477e-41,
2.6598e-40, -4.4078e-40, 4.1763e-40,
-3.3947e-40, -5.5626e-40, 4.9713e-40,
2.1733e-40, -2.9024e-40, -4.5514e-42,
-3.4873e-40, -1.0737e-40, -1.4297e-40,
2.8514e-40, 2.6283e-40, 2.2827e-40,
3.8908e-40, -4.2140e-40, 6.1433e-40,
-4.7825e-40, -3.0140e-40, -5.9563e-40,
1.5280e-40, 2.6156e-40, 5.0361e-40,
1.9497e-01, 2.3140e-01, -3.5244e-02,
1.6876e-01, -1.7646e-02, -2.0413e-01,
9.8052e-02, -6.7906e-02, -3.9834e-02,
-5.9252e-15, -6.7431e-15, -8.1865e-15,
-5.7350e-15, -6.6893e-15, -8.9833e-15,
-8.4106e-15, -1.0631e-14, -1.5948e-14,
8.9389e-02, 6.6460e-02, 6.8477e-02,
6.1099e-03, -8.7536e-02, 1.1792e-01,
-1.0079e-01, 1.5293e-01, 4.3945e-02,
1.0168e-01, 1.0281e-01, -7.9173e-02,
2.0855e-01, 1.7537e-01, -7.1000e-02,
-1.4157e-01, -3.8478e-02, -2.7478e-01,
2.2156e-01, -6.4262e-02, -7.2841e-02,
-3.2334e-01, 6.5591e-02, 1.1163e-01,
7.2151e-02, -1.6943e-01, 5.9049e-02,
-1.4813e-01, -2.0904e-01, -8.8010e-02,
-2.7215e-01, 5.7668e-01, 1.7618e-02,
-7.1365e-02, 1.2976e-01, -1.0169e-01,
-8.9229e-02, 3.3971e-02, 1.8295e-01,
1.7204e-01, 3.8082e-01, 3.7415e-02,
5.9309e-02, -4.9550e-04, 5.1555e-01,
-5.1006e-18, -5.6038e-18, -5.8724e-18,
-5.8910e-18, -5.8379e-18, -5.6311e-18,
-5.2596e-18, -5.1835e-18, -4.6300e-18,
6.4067e-02, 1.8889e-02, -1.0634e-01,
1.7316e-04, 1.9935e-01, -1.1854e-02,
-9.3669e-02, -1.1924e-01, -1.8981e-02,
1.7465e-08, 1.7340e-08, 1.7565e-08,
1.8234e-08, 1.8008e-08, 1.8017e-08,
1.9226e-08, 1.8956e-08, 1.8651e-08,
-1.7294e-01, -1.2200e-01, -4.9577e-02,
-3.5087e-02, -1.2526e-01, 9.3445e-03,
-7.4374e-02, -1.1350e-01, 2.7510e-03,
8.5153e-02, 4.2080e-02, -5.0111e-02,
1.2845e-01, 1.9630e-01, 1.0542e-01,
-1.0095e-01, 6.2631e-02, 8.8734e-02,
3.4836e-01, 5.4389e-01, -2.2360e-01,
5.1721e-01, 5.7094e-01, -6.7491e-02,
-3.5972e-02, 1.0590e-01, -2.2984e-01,
-1.5483e-01, -5.1271e-03, 4.9780e-02,
-1.3184e-01, 2.8028e-01, -1.1427e-02,
-3.4093e-02, -6.7622e-02, -1.2359e-02,
1.3184e-02, 1.2125e-01, -1.2502e-02,
9.2730e-02, -6.5974e-02, -1.6519e-01,
1.9546e-01, -1.5188e-01, -8.1752e-02
}
,
{
-3.4905e-04, -3.5739e-04, -3.2920e-04,
-3.8506e-04, -3.9121e-04, -3.5635e-04,
-3.7303e-04, -3.7698e-04, -3.4190e-04,
2.8622e-41, -1.2033e-41, 1.2609e-40,
-4.9379e-40, -5.1047e-40, 5.5085e-41,
-4.7002e-40, -5.0136e-40, -4.5629e-40,
-5.1095e-40, 1.8741e-40, 1.8435e-40,
4.1851e-40, -8.9558e-41, -9.6681e-41,
-1.8244e-40, 2.7992e-40, 1.8116e-40,
2.8655e-40, -3.0193e-40, 2.2293e-40,
1.6805e-40, 3.3049e-40, 6.9542e-41,
-3.3329e-40, 4.2212e-40, -1.3453e-40,
-8.4502e-15, -1.1099e-14, -9.4174e-15,
-9.8778e-15, -1.1768e-14, -9.4875e-15,
-6.7805e-15, -7.4561e-15, -5.8023e-15,
6.0452e-40, 6.9262e-41, 2.9300e-40,
-6.1511e-40, -4.1269e-40, 4.4012e-40,
1.3340e-42, -2.9020e-40, -4.5529e-40,
-1.2289e-22, -1.3972e-21, -5.5694e-21,
-1.7854e-21, -1.7743e-20, -5.6749e-20,
-6.8510e-21, -6.2353e-20, -1.6203e-19,
-5.0003e-07, -5.1950e-07, -4.7654e-07,
-5.5510e-07, -5.7995e-07, -5.2753e-07,
-5.3262e-07, -5.5802e-07, -5.0971e-07,
-1.4922e-02, -1.1926e-01, -1.9067e-02,
-2.6298e-03, 2.1756e-01, 3.0148e-02,
1.4372e-01, 3.5066e-02, -1.0184e-02,
-4.1698e-12, -4.8798e-12, -6.4033e-12,
-2.3169e-12, -2.7879e-12, -3.7276e-12,
-1.6177e-12, -2.0021e-12, -2.6440e-12,
-5.9514e-40, -4.4339e-40, -3.0315e-40,
3.5756e-40, 2.5390e-40, -1.2253e-40,
2.1417e-40, 4.0569e-40, 5.3962e-40,
-5.5825e-13, -6.8528e-13, -9.3486e-13,
-2.9163e-13, -3.6959e-13, -5.1183e-13,
-1.8703e-13, -2.4740e-13, -3.4019e-13,
-2.7137e-01, -4.5025e-01, 2.6405e-02,
-7.9580e-02, 5.0698e-01, -7.8794e-02,
-3.7540e-02, -7.1115e-03, -3.9741e-01,
-5.9910e-40, -5.5101e-40, 3.1274e-41,
-6.9384e-41, -4.9294e-40, -1.0818e-40,
-3.5484e-40, -4.7965e-41, -5.2508e-41,
4.1917e-01, -1.6207e-02, -6.8506e-02,
-2.7060e-02, 5.6162e-01, 1.6696e-01,
-1.7677e-03, 1.8842e-01, -6.0493e-02,
-3.0696e-01, -1.7293e-01, -8.7143e-02,
-1.6740e-01, 1.8861e-02, -1.7112e-01,
8.6594e-02, 3.0025e-01, -7.6141e-02,
1.1317e-02, 1.0678e-01, -5.1283e-02,
-1.2872e-01, 4.2580e-01, 4.9678e-02,
-2.8372e-01, -1.3479e-01, -7.3813e-02,
-1.7038e-15, -1.1156e-15, -7.3385e-16,
-2.6350e-15, -1.6234e-15, -1.0598e-15,
-7.7860e-15, -4.6981e-15, -3.0030e-15,
-3.0246e-40, -4.1596e-40, 2.9013e-40,
8.5195e-41, -2.2396e-40, -2.0322e-40,
-5.6200e-40, 2.4820e-40, 3.1309e-40,
-3.1822e-17, -1.6585e-17, -8.8616e-18,
-5.9907e-17, -2.9812e-17, -1.6126e-17,
-2.4410e-16, -1.2541e-16, -6.7867e-17,
1.5795e-01, -1.4429e-01, -6.0501e-02,
5.9113e-02, 3.4391e-01, 1.4165e-01,
5.2564e-02, -1.8209e-01, -6.8176e-02,
-7.7363e-41, 5.9969e-40, 5.9290e-40,
-7.4888e-41, -7.0945e-41, 5.3120e-40,
1.3612e-40, -4.6718e-40, -1.0677e-40,
-1.1498e-01, -1.2925e-02, 2.6735e-02,
-8.1469e-02, 2.9678e-01, 1.8971e-01,
2.0149e-02, 2.4207e-03, -1.2549e-01,
-6.6799e-02, -3.5900e-02, -5.6111e-02,
9.5181e-02, 2.1216e-02, 2.0477e-01,
8.5923e-03, 6.8615e-03, 3.8252e-02,
4.5098e-03, 2.1321e-01, 3.4612e-03,
3.5662e-01, 4.7532e-02, 2.5319e-01,
4.1275e-02, 1.7951e-01, 3.2239e-02,
-2.6628e-21, -7.7165e-22, -4.9086e-22,
-1.4320e-21, -2.7134e-22, -1.2712e-22,
-1.9648e-21, -3.4172e-22, -1.3895e-22,
-2.2836e-40, 3.2091e-40, -4.4396e-40,
2.9048e-40, 6.0866e-40, 3.7804e-40,
-3.0676e-40, -2.4897e-40, 4.9891e-40,
-1.8955e-28, -3.4994e-29, -1.2914e-29,
-4.7737e-29, -3.5212e-30, -6.4003e-31,
-8.2908e-29, -3.1692e-30, -3.6909e-31,
-9.3327e-02, 1.5314e-01, 1.0676e-01,
2.5979e-01, -6.6826e-01, 2.3727e-01,
1.4855e-01, 1.9205e-01, 8.8246e-02,
-5.5197e-40, 5.3162e-41, -5.2933e-40,
1.0846e-41, -5.8128e-40, -3.1273e-40,
-2.8408e-40, 1.6989e-40, 4.8221e-41,
7.8403e-02, 1.6407e-01, 7.9932e-02,
3.2253e-01, -2.6036e-01, -8.9727e-02,
-7.5145e-02, 1.5536e-02, -8.2710e-02,
-2.1608e-01, -4.4619e-01, -4.4470e-02,
-3.9430e-01, -8.2373e-01, -7.0646e-01,
-6.9004e-03, -4.9697e-01, -1.4212e-01,
-1.8932e-06, -1.8356e-06, -1.6373e-06,
-1.9427e-06, -1.9113e-06, -1.7028e-06,
-1.8843e-06, -1.8616e-06, -1.6818e-06,
-4.7452e-29, -4.4894e-29, -2.5364e-29,
-5.6268e-29, -5.4363e-29, -3.0876e-29,
-4.3808e-29, -4.2767e-29, -2.4573e-29,
3.8855e-40, 3.5152e-40, -4.8707e-40,
4.3606e-41, -1.7886e-40, 5.1970e-40,
6.2864e-40, 5.9972e-40, 2.2197e-40,
-2.1903e-37, -1.9174e-37, -7.0785e-38,
-2.7149e-37, -2.4810e-37, -9.5619e-38,
-1.8463e-37, -1.7136e-37, -6.7163e-38,
-2.9062e-30, -3.1324e-30, -1.0876e-30,
-2.7434e-30, -3.7036e-30, -1.2821e-30,
-6.8828e-31, -9.8708e-31, -3.7930e-31,
-6.3329e-41, -3.8604e-41, -2.8272e-40,
-3.3350e-40, -1.5210e-40, -4.2620e-41,
-1.7669e-41, 5.2291e-40, -3.3205e-40,
-3.0738e-25, -8.2305e-24, -2.1451e-23,
-1.4470e-24, -4.5131e-23, -1.2177e-22,
-4.2841e-24, -1.3077e-22, -3.5946e-22,
-8.5637e-08, -8.4715e-08, -7.7597e-08,
-8.7326e-08, -8.7480e-08, -8.0290e-08,
-8.4525e-08, -8.4963e-08, -7.8582e-08,
-5.8581e-27, -8.8483e-27, -8.1150e-27,
-7.4336e-27, -1.2036e-26, -1.1909e-26,
-6.6006e-27, -1.0685e-26, -1.0809e-26,
-5.6355e-40, -2.3469e-40, -3.5885e-40,
-2.0755e-40, 2.0377e-40, 3.2259e-40,
-5.3947e-40, 4.2747e-41, 4.8967e-41,
4.5073e-41, 5.0069e-40, 2.6114e-40,
-4.8225e-40, -4.8317e-40, -5.4316e-40,
-5.4335e-40, -5.2994e-40, 2.6295e-40,
-1.1702e-40, -2.3137e-41, -4.5405e-40,
-4.6797e-40, 6.5582e-41, 1.8111e-40,
6.1477e-40, -1.6827e-40, -2.0288e-40,
-2.4220e-41, 4.7774e-40, 5.1050e-40,
4.9844e-40, 5.6437e-41, 4.7749e-40,
-6.8037e-41, -5.5944e-41, -5.2248e-40,
-2.9382e-40, 2.3800e-41, 1.5850e-40,
-4.5290e-40, -5.2260e-41, 2.3726e-40,
-1.9232e-40, -2.3502e-40, -2.9736e-40,
-2.8081e-40, -5.2929e-40, -4.0786e-40,
-3.0303e-41, 3.1336e-40, -5.8450e-40,
-1.5091e-40, -2.7371e-40, -4.5927e-40,
-4.0985e-38, -6.9102e-38, -5.4450e-38,
-6.2744e-38, -1.1526e-37, -9.9374e-38,
-4.8587e-38, -9.1819e-38, -8.0593e-38,
-2.9266e-29, -4.5005e-29, -3.9891e-29,
-3.8505e-29, -6.3370e-29, -6.0017e-29,
-3.2761e-29, -5.4145e-29, -5.1812e-29,
3.3692e-40, 1.0044e-40, -6.6821e-41,
9.2910e-41, 6.2137e-40, -3.5625e-40,
1.8601e-40, 3.1653e-40, -1.1506e-40,
1.2093e-40, -5.7191e-40, 5.6828e-40,
-2.3177e-40, -2.1648e-40, 5.3642e-40,
4.8826e-40, 5.2760e-40, -4.9059e-40,
-2.0721e-40, 2.0122e-40, -5.9485e-40,
3.8843e-40, -6.0861e-41, -4.0542e-40,
-3.4308e-40, -4.2822e-40, -3.9605e-40,
-5.7429e-40, 4.9242e-40, -5.9141e-40,
4.6267e-40, -2.4953e-40, -2.9300e-40,
5.3466e-40, -5.2403e-40, 3.5178e-40,
-1.8309e-40, 2.9157e-40, -7.7367e-41,
-5.8922e-40, 3.2359e-40, -6.1293e-40,
6.1138e-40, 2.2121e-40, -5.0657e-42,
4.7910e-40, -1.4080e-40, 1.9220e-40,
-3.5670e-40, 3.4204e-40, -5.0215e-40,
1.1877e-41, 2.3114e-40, -4.7794e-40,
-3.6520e-40, 4.3222e-40, -5.2866e-40,
-6.0703e-40, -4.0896e-40, -1.2521e-40,
-4.1981e-40, 5.4404e-41, 3.3337e-40,
1.3733e-01, 1.8485e-01, 7.6179e-02,
8.1719e-02, 3.3343e-01, 2.9857e-02,
-4.2753e-03, 2.0957e-01, 1.8582e-02,
2.9948e-07, 3.3403e-07, 3.7619e-07,
3.4854e-07, 3.8224e-07, 4.1507e-07,
3.7511e-07, 4.0398e-07, 4.3743e-07,
-1.7150e-41, -2.4088e-41, -1.5593e-40,
6.3817e-41, 4.8004e-41, -1.1053e-40,
-2.5225e-40, -2.7111e-40, -4.2970e-40,
1.0496e-06, 1.0916e-06, 1.1376e-06,
1.1364e-06, 1.1756e-06, 1.2051e-06,
1.1762e-06, 1.2105e-06, 1.2358e-06,
1.0037e-02, 1.4957e-01, -4.9010e-02,
2.6877e-02, 1.9067e-01, -1.9339e-03,
-2.2081e-02, -1.5137e-01, -1.6088e-01,
1.6880e-41, -2.0352e-41, -4.1857e-42,
2.0926e-40, -2.1394e-41, -5.4341e-40,
4.6824e-40, 6.2682e-40, 4.9865e-40,
-3.2967e-01, -2.5981e-01, -1.3016e-01,
-2.6507e-01, 3.2282e-01, 4.3204e-01,
-7.0936e-02, 1.9800e-01, 9.4916e-02,
-1.0122e-02, 7.4127e-02, -7.1554e-02,
7.7869e-02, 1.5734e-01, 1.3287e-01,
-9.5431e-02, 1.0984e-01, -7.6759e-02
}
,
{
-5.5262e-40, 3.7699e-40, -1.4920e-40,
4.0064e-40, -2.0632e-40, -4.4801e-41,
-3.6749e-40, 5.9043e-40, -1.5942e-40,
-5.9219e-42, -4.1286e-40, -1.6920e-40,
-2.5927e-40, -4.5458e-41, 2.0990e-40,
-4.6860e-40, 5.0483e-40, 2.8004e-40,
-4.0641e-40, 6.0770e-40, -3.8297e-42,
5.7537e-40, 5.7772e-40, -1.0048e-40,
1.5945e-40, 3.9582e-40, -2.6190e-40,
-5.1046e-40, -5.5028e-40, 5.8786e-40,
-3.5033e-40, -1.2031e-40, -3.4156e-40,
3.0058e-40, 4.3043e-40, 5.9825e-40,
4.9197e-40, 2.5974e-40, -4.3461e-41,
-4.1935e-40, -1.6383e-41, -1.4680e-40,
-5.3501e-40, -2.6348e-40, 3.0631e-40,
-5.2019e-40, -4.4123e-40, 2.3984e-40,
-4.4682e-41, -4.6000e-40, -5.0418e-40,
-4.1263e-40, 4.5391e-40, 2.8844e-40,
5.2179e-40, -1.3188e-40, 5.1600e-40,
-2.2913e-40, -3.1127e-40, 5.4478e-40,
2.3395e-41, 5.4758e-40, 2.0998e-40,
-1.9914e-10, -2.0700e-10, -1.9815e-10,
-2.1098e-10, -2.1989e-10, -2.1131e-10,
-2.0797e-10, -2.1693e-10, -2.0860e-10,
-2.1061e-40, -2.1208e-40, -3.3698e-40,
3.2370e-40, 2.9276e-40, -3.6860e-40,
3.4752e-40, -2.0660e-40, -3.8183e-40,
-8.0136e-02, 1.3809e-02, 1.6846e-03,
3.7960e-02, 8.7557e-02, -3.5498e-01,
9.8165e-03, 9.8384e-02, 1.2395e-01,
-2.8751e-02, 9.9172e-02, 5.5841e-02,
-4.0383e-02, 1.0856e-01, -5.4339e-01,
1.3245e-02, -4.7642e-02, -1.0427e-01,
-7.4696e-03, 5.0806e-02, -1.7179e-01,
5.0303e-02, -4.0322e-01, 7.4760e-01,
-9.2342e-02, 1.1958e-01, -1.8871e-01,
3.7044e-40, -4.6951e-40, -1.9873e-40,
5.3289e-41, 2.7689e-40, -4.6994e-41,
-3.1404e-40, -5.9106e-40, 6.0436e-40,
-6.0294e-40, -3.6565e-40, -1.1884e-40,
5.5933e-40, -9.5741e-41, 4.4736e-40,
4.3267e-40, -4.9583e-40, 3.4437e-40,
-1.7432e-40, 1.4518e-40, 2.1033e-40,
-3.4667e-40, 1.7222e-40, -2.5651e-40,
-5.2517e-40, 2.8983e-41, -1.3832e-40,
-1.4153e-01, 9.4023e-02, -9.8526e-02,
2.0678e-01, 4.0842e-01, -1.1853e-01,
-1.4108e-01, -1.1005e-01, -8.1274e-02,
3.4336e-41, 1.5625e-40, 2.7213e-40,
-5.3447e-40, -3.7330e-40, -3.3637e-40,
-4.3563e-40, -3.7094e-40, 1.2820e-41,
-8.1700e-02, -1.8215e-01, -1.6011e-01,
-1.4203e-01, 5.3791e-02, -3.7663e-02,
-1.1705e-01, -1.2604e-01, -8.4890e-03,
-6.1578e-02, -3.3907e-01, 2.2344e-03,
1.5060e-01, -1.9199e-01, -5.5274e-02,
6.2300e-02, 9.1084e-02, 1.3788e-02,
4.9025e-02, 3.3738e-01, -1.8104e-01,
-2.5051e-01, 8.2363e-02, 2.0325e-01,
5.6988e-02, -1.5118e-01, 6.8897e-02,
-4.6233e-40, 1.2244e-40, -3.9802e-40,
5.8530e-40, -2.4162e-40, 4.6793e-40,
-4.8362e-40, 3.3071e-40, 1.7094e-40,
3.5249e-40, -4.8579e-40, 1.9374e-40,
6.2372e-42, 5.8402e-41, 3.2851e-40,
6.1488e-40, 1.8086e-40, -5.2451e-40,
-3.0723e-40, -5.6704e-40, -5.9899e-40,
-3.5975e-40, -1.3818e-40, -2.7285e-40,
2.4468e-40, 8.3606e-41, 1.8818e-40,
-2.3749e-01, -2.7008e-01, -1.5222e-03,
1.4806e-01, 9.0783e-02, 2.7170e-02,
1.8706e-01, 1.8162e-01, -1.1799e-01,
-1.9852e-40, -4.8879e-40, -3.1971e-40,
-1.0245e-40, 9.1421e-41, 5.3018e-40,
2.2240e-40, -1.4666e-40, -4.4259e-40,
1.1835e-01, -2.7624e-01, 1.1446e-01,
1.3574e-01, 4.3109e-01, 1.3227e-01,
3.2554e-02, 1.7139e-01, -1.1988e-01,
3.5376e-02, 8.9191e-02, 6.7643e-02,
-8.2716e-02, 2.4178e-01, 6.0818e-02,
-6.7722e-02, -3.3712e-02, 3.0664e-02,
-6.6948e-02, 2.2886e-01, 1.8143e-01,
1.8636e-01, -2.4800e-01, 1.7185e-01,
-6.5479e-03, 1.8828e-01, -7.4464e-02,
-2.8281e-30, -5.8969e-31, -2.3180e-31,
-1.6163e-30, -3.8426e-31, -1.6788e-31,
-1.9412e-30, -4.1995e-31, -1.7651e-31,
-2.0525e-40, 4.6680e-40, 5.9108e-41,
1.0336e-40, -5.7226e-41, -6.1906e-40,
-1.8693e-40, 5.5777e-40, 6.0898e-40,
-3.4735e-41, -3.2674e-40, -2.3864e-41,
-3.3596e-40, 3.3107e-40, 1.0843e-40,
5.1103e-40, 6.0598e-40, -3.6267e-40,
-4.5583e-03, -1.0635e-01, -7.4962e-02,
-1.2741e-01, 2.7234e-01, 1.0508e-01,
-2.1207e-01, 9.6720e-02, 3.4641e-02,
1.1304e-12, 1.1614e-12, 9.7086e-13,
1.3361e-12, 1.3697e-12, 1.1286e-12,
1.2620e-12, 1.2938e-12, 1.0680e-12,
-8.4197e-02, 6.3834e-02, 2.3157e-02,
-2.1280e-02, 2.9074e-01, 8.5883e-02,
-1.3695e-01, -1.6047e-01, -4.5834e-02,
-1.3848e-01, -6.6090e-02, -7.7201e-02,
-5.1963e-02, 6.0643e-02, -4.9932e-02,
1.1779e-01, 1.7521e-01, 3.0366e-02,
4.7601e-03, 4.3941e-02, -3.5985e-02,
1.7692e-02, -2.3705e-01, 2.1062e-01,
7.7174e-02, -7.6616e-02, 2.0102e-02,
-3.6353e-06, -3.5534e-06, -3.2461e-06,
-3.6813e-06, -3.6196e-06, -3.3222e-06,
-3.5581e-06, -3.5179e-06, -3.2504e-06,
-7.3892e-11, -7.2930e-11, -6.8104e-11,
-7.9244e-11, -7.7770e-11, -7.2319e-11,
-7.7297e-11, -7.5673e-11, -7.0195e-11,
-1.5180e-10, -1.5027e-10, -1.4244e-10,
-1.6013e-10, -1.5761e-10, -1.4940e-10,
-1.5682e-10, -1.5395e-10, -1.4553e-10,
-9.1167e-02, 1.2374e-01, -3.8304e-02,
2.2641e-01, 2.4855e-01, -4.3174e-02,
1.4364e-01, 1.8438e-01, 1.1617e-02,
6.1925e-40, 3.3333e-40, 1.8962e-40,
3.2481e-40, -1.7566e-40, -3.0456e-40,
2.7654e-40, 3.8422e-41, 4.9191e-40,
7.5657e-02, -1.0697e-03, 3.0319e-02,
-4.7642e-02, -9.4454e-02, -2.6543e-02,
-5.3129e-02, -1.9667e-01, -1.0851e-01,
-8.5909e-03, 1.2177e-01, 2.6434e-01,
2.4468e-02, 5.0484e-02, 3.4698e-01,
-1.4764e-03, 3.7374e-02, 1.2658e-01,
2.0602e-02, -2.4624e-02, 1.3741e-01,
1.8641e-02, 4.0484e-01, 3.2976e-01,
-4.4809e-01, -3.2104e-03, 1.6290e-03,
8.1306e-41, 2.0311e-40, 2.9683e-40,
-5.7636e-40, 4.4291e-40, 4.3356e-40,
-7.1797e-41, 4.5366e-40, 3.9953e-40,
-4.5418e-40, 4.1805e-40, -3.2458e-41,
-9.4881e-41, -8.6365e-41, -1.9294e-40,
7.1954e-41, -9.8565e-41, -5.5540e-40,
-5.3769e-40, 1.4094e-40, -1.5355e-40,
8.8038e-41, -3.6848e-40, -1.2237e-40,
-2.8267e-41, -1.7583e-40, -5.9647e-40,
1.0929e-01, 2.9895e-02, -1.4923e-01,
-1.1234e-01, -1.0514e-01, -1.3280e-02,
2.2255e-01, 6.4152e-03, -1.6309e-02,
-1.5899e-40, -7.2549e-41, -2.6734e-40,
-3.3842e-40, 3.3255e-40, 4.2694e-40,
5.2940e-40, 3.2455e-40, -3.7081e-40,
6.3639e-02, -3.3720e-02, -2.3453e-02,
1.9477e-01, 5.2267e-02, 1.8565e-02,
1.6048e-01, 2.7636e-01, 1.5930e-02,
1.7673e-03, 6.3646e-02, -1.5127e-02,
-3.7787e-02, -1.4037e-01, -3.6231e-02,
-1.5636e-02, -7.8742e-02, -2.4137e-02,
-5.0748e-02, 6.5641e-02, -2.5353e-03,
8.4955e-02, 7.4231e-01, 1.3795e-01,
-1.4552e-01, 2.0869e-01, 4.0739e-02,
-2.0015e-41, 5.2988e-40, 2.7578e-40,
4.1051e-40, 1.2834e-40, -3.4898e-40,
-1.1975e-40, 4.2374e-40, -3.0404e-41,
-6.3014e-40, 4.6330e-40, -4.4141e-41,
2.5442e-41, 5.7456e-40, 2.3848e-40,
-1.0788e-40, -5.0563e-40, -5.3638e-41,
3.5728e-40, 1.9752e-40, 6.1004e-40,
2.8189e-41, -6.2151e-40, 1.1807e-41,
6.5305e-41, 5.2028e-40, 1.3692e-40,
6.4391e-02, -1.3079e-01, -3.7980e-02,
-3.2362e-01, -3.7239e-01, -8.0182e-02,
-2.6787e-01, -3.1240e-01, -1.2798e-02,
-1.2072e-40, 5.3996e-40, -3.4352e-40,
-8.0996e-41, -3.0208e-40, 3.1848e-40,
-5.6407e-40, 2.4674e-41, -2.1055e-40,
-9.2897e-02, 1.8040e-01, -4.3269e-01,
-7.6669e-02, 4.3554e-01, -4.4870e-02,
-2.3249e-02, -1.1805e-01, 1.0507e-01,
-5.2540e-02, -3.6856e-01, 1.1246e-01,
-2.3632e-02, 1.3165e-01, -1.5380e-02,
-1.1467e-02, -5.3754e-02, -4.1619e-02,
-1.5635e-01, 3.8584e-01, -1.4434e-01,
1.7523e-01, 3.7253e-02, 4.9784e-01,
5.8484e-02, -8.4711e-02, -7.7498e-02,
-1.6956e-40, 5.4293e-41, -2.5140e-40,
-3.1995e-40, -4.8337e-40, 2.5539e-40,
-1.1449e-40, 1.9503e-40, -1.7368e-40,
5.4753e-40, 5.9720e-40, -4.7821e-40,
3.8830e-40, -3.1984e-40, -2.7163e-40,
-5.3411e-40, 7.2638e-41, 4.3186e-40,
4.6654e-40, -5.9540e-40, -2.8155e-40,
-1.4801e-40, -1.6945e-40, 1.9723e-40,
5.8380e-40, -6.1587e-40, 3.3667e-40,
-2.9327e-02, -4.2746e-02, -1.5018e-01,
8.6354e-02, 2.8140e-01, 1.2970e-02,
-2.0755e-01, 6.7548e-02, -3.6049e-02
}
,
{
9.5728e-41, 5.3991e-40, -1.3764e-40,
-2.0389e-40, 2.4254e-40, 3.3492e-40,
6.5289e-41, -3.0842e-40, 5.5850e-40,
7.7599e-02, 2.5043e-02, -1.4099e-02,
-3.3184e-02, 5.6863e-01, -2.7001e-02,
-5.2659e-02, 5.4713e-02, 2.3991e-03,
2.2010e-02, -3.9120e-02, -1.1558e-01,
9.1633e-02, 1.3070e-01, 1.2489e-01,
-4.4040e-02, -1.6324e-02, -4.9631e-02,
-7.3548e-02, -2.0492e-01, 1.4043e-01,
-6.0411e-02, 5.7710e-02, -3.6840e-02,
1.3173e-02, 2.3215e-03, 1.1820e-02,
2.5772e-02, -1.3436e-01, -5.9285e-02,
-9.3983e-02, 1.1545e-01, 1.1602e-01,
-1.8505e-02, 6.1498e-02, -1.3097e-02,
9.8690e-03, -2.1338e-02, -1.2175e-01,
1.7936e-02, -2.7811e-02, 6.7037e-02,
-5.1401e-03, 7.6421e-02, -1.0794e-01,
4.6409e-02, 3.4701e-01, 2.6587e-02,
8.4175e-02, 5.2712e-01, 6.8999e-02,
-8.0756e-02, 1.9648e-01, -8.4639e-02,
1.2818e-01, 4.0660e-02, 7.6715e-02,
8.7991e-02, 4.6556e-01, -4.0025e-02,
2.1251e-03, -8.3784e-03, 5.9859e-02,
1.9835e-40, -3.4675e-40, -7.9692e-41,
-1.4304e-40, 2.3927e-40, -5.9796e-40,
3.8209e-40, -6.3260e-41, -9.2501e-41,
3.2007e-01, 1.5800e-01, -1.9594e-02,
-4.5315e-02, 1.0536e-01, -8.0692e-02,
2.1185e-01, -3.1418e-01, -1.5257e-01,
8.6294e-02, -1.3398e-01, -1.0694e-01,
8.6084e-02, -1.2393e-03, 1.7549e-02,
-1.5504e-01, -1.3112e-01, -3.5905e-02,
-3.8190e-01, 3.8393e-01, 1.6587e-02,
1.5002e-01, 1.9586e-01, -2.6260e-01,
-4.0159e-02, -8.2891e-02, -1.7761e-01,
-1.8611e-01, -1.1241e-02, -4.2538e-02,
-5.7898e-02, 2.4583e-01, 4.1590e-02,
2.4890e-02, 7.9409e-03, -2.7418e-02,
6.6194e-03, -4.2441e-02, -1.1167e-01,
-1.3236e-01, -7.9642e-02, -6.0623e-02,
-4.7198e-03, 5.6904e-02, 1.2651e-01,
1.2925e-01, -5.9162e-02, -9.1949e-04,
1.8668e-02, -2.6361e-02, -7.1042e-03,
-4.3178e-02, 2.6050e-04, 4.4799e-02,
7.9674e-02, 2.7656e-02, 7.1211e-03,
1.1463e-01, 1.0765e-01, 7.6066e-02,
-8.0780e-02, -5.4875e-02, 1.5209e-02,
-3.7365e-13, -3.7819e-13, -3.5929e-13,
-4.0298e-13, -4.0881e-13, -3.9033e-13,
-3.9409e-13, -3.9950e-13, -3.8277e-13,
-1.7847e-02, -1.7537e-02, -3.7313e-03,
2.6531e-02, 7.5951e-02, -4.0134e-03,
1.7387e-02, 6.0044e-02, -9.0211e-02,
2.7091e-02, 8.8333e-02, 1.0619e-01,
5.0470e-02, 1.2406e-02, 1.5503e-01,
-1.5936e-02, -2.2422e-01, -2.4640e-02,
-8.2430e-03, -1.4097e-02, -6.2474e-02,
8.0534e-02, 1.8603e-01, -3.1725e-02,
-3.1621e-03, 2.0362e-03, -1.4002e-01,
-7.3799e-03, 1.5881e-01, 6.7195e-02,
4.5946e-02, 2.4358e-01, 1.4677e-01,
-7.4788e-02, 6.7297e-02, 9.0735e-02,
-8.4553e-03, -1.1877e-02, 4.4209e-02,
-1.4281e-02, -6.8849e-02, -4.1386e-03,
3.2286e-02, 4.7128e-02, -1.2988e-02,
-2.2990e-02, -8.9265e-02, 6.4050e-02,
-2.3354e-02, 1.3846e-01, -1.6256e-01,
-6.5661e-02, -2.8983e-02, -4.3497e-02,
1.0597e-02, -2.3534e-02, -2.6068e-02,
-7.8812e-02, 1.9502e-01, 6.8938e-03,
3.2025e-02, 2.3353e-02, 4.9225e-02,
-5.0273e-40, 1.2403e-41, 5.8127e-40,
3.2777e-40, -3.5740e-40, 4.9781e-40,
-2.4198e-40, -4.6311e-40, 1.3330e-40,
-3.0803e-01, 1.7804e-01, 1.0604e-01,
4.1405e-01, 1.9740e-01, -5.3067e-02,
2.3738e-01, -1.6828e-01, 1.5338e-01,
6.6857e-03, 1.8623e-01, -1.2126e-01,
-1.6323e-01, -1.2719e-02, -1.7743e-01,
-1.3612e-01, -3.4442e-02, -1.0552e-01,
-1.4560e-01, 1.8771e-01, 8.4508e-02,
5.8732e-02, -2.2378e-01, 1.2673e-01,
3.0455e-03, 3.8438e-02, -6.2235e-02,
1.9951e-02, 2.6963e-01, -1.8594e-01,
-8.6550e-02, -1.3097e-01, -3.5032e-02,
2.0423e-02, -9.0499e-02, 1.7130e-01,
-1.8592e-01, 6.6808e-02, -1.5768e-01,
-6.4402e-02, -1.2265e-01, 6.8487e-02,
1.9899e-02, 9.3376e-02, 7.8577e-02,
-1.3384e-01, -7.6429e-02, 1.7142e-02,
-1.2385e-01, -1.1821e-01, -1.2716e-03,
5.3770e-02, 1.4973e-01, 1.4762e-01,
-4.7688e-02, -1.1733e-01, -1.5032e-01,
-2.0699e-01, -9.4949e-02, -2.6374e-02,
4.4489e-02, 1.8376e-02, -7.6844e-02,
1.8831e-40, -2.6056e-40, -4.7602e-40,
-3.4079e-40, 1.5054e-40, 1.2387e-40,
2.3040e-40, 1.4644e-40, 5.6365e-40,
-2.0809e-02, 5.3674e-03, 1.7057e-03,
2.4160e-01, 4.1348e-01, 3.5215e-02,
8.2154e-02, 2.0431e-01, 1.0366e-01,
-1.5149e-02, 1.0521e-01, -4.1706e-02,
-5.0651e-02, 2.3615e-02, -9.3860e-02,
-1.0823e-01, -6.3645e-02, -1.1573e-01,
-2.4116e-02, 1.3546e-02, -1.0298e-03,
1.2102e-02, 2.2630e-02, 1.1375e-01,
1.3966e-02, 1.0754e-01, 1.6621e-01,
1.6213e-02, 2.0816e-01, 8.9441e-02,
-7.5452e-02, 3.4580e-03, -3.3317e-01,
5.0917e-02, 1.3898e-01, -1.0723e-01,
6.0473e-03, 8.9741e-02, -6.8206e-02,
-7.1770e-02, -3.5661e-01, -2.8935e-01,
-1.6324e-02, 2.5728e-02, -1.1281e-02,
-1.3390e-01, -9.3090e-02, 4.3366e-02,
4.8620e-02, 1.4917e-01, 1.6295e-01,
2.4123e-03, -7.6347e-02, -8.0226e-02,
6.0740e-03, 3.7065e-02, 4.5518e-04,
-1.3793e-01, 2.3848e-01, -1.1199e-01,
1.0422e-01, 1.1214e-01, 3.3457e-02,
-3.2827e-40, 5.9135e-40, 3.3773e-40,
-5.8903e-40, -5.9439e-41, 1.9973e-40,
-3.6141e-40, -4.7563e-40, -1.0222e-40,
7.3457e-02, -8.2031e-02, -2.9504e-02,
-5.3420e-02, 4.9697e-02, 7.6779e-03,
2.1180e-02, 1.1069e-02, -1.1940e-02,
1.7302e-02, 9.9063e-02, 4.8847e-02,
4.9513e-02, 2.4240e-01, 2.7174e-01,
2.7487e-01, 1.9410e-01, 3.1165e-01,
-6.7532e-03, -1.1608e-01, -5.0876e-02,
1.2107e-01, 3.1073e-01, 7.1681e-02,
-1.1411e-01, -1.7902e-01, 7.8898e-02,
-2.0117e-02, 3.6394e-01, 1.4546e-01,
-8.0861e-03, -4.3956e-02, -1.3473e-01,
5.1519e-02, -3.1122e-01, -4.6847e-02,
5.0405e-02, -1.0611e-02, -1.0557e-01,
-4.4346e-02, -1.4505e-01, 5.3977e-02,
-2.6288e-01, 1.8247e-02, -1.1606e-01,
1.0706e-01, -9.3675e-02, 1.1757e-01,
-5.0440e-02, -1.1784e-01, -4.0599e-02,
1.9618e-01, 9.9370e-02, 8.2258e-02,
2.6762e-02, -5.0740e-02, -1.8302e-02,
5.3340e-02, 6.5710e-02, 6.1552e-03,
-7.2158e-02, -3.5563e-02, 8.2140e-02,
3.1534e-40, 3.6427e-40, 3.0437e-40,
4.2856e-41, -4.7870e-40, 5.6317e-40,
-2.4673e-40, -6.9736e-41, 8.1050e-41,
1.4544e-01, 8.2490e-02, -9.2349e-03,
2.6124e-01, 2.7494e-01, -5.4946e-02,
1.8233e-01, 1.2428e-01, -6.7498e-03,
9.7639e-02, -6.2085e-03, 4.8154e-02,
2.7379e-02, -1.8443e-01, 4.0402e-02,
1.8893e-03, -5.2282e-03, 6.7548e-03,
-1.6559e-01, 9.7901e-02, -1.1869e-01,
-2.1287e-01, 4.1023e-01, -9.7379e-02,
-1.3767e-03, -1.6343e-01, -9.5059e-02,
-1.3547e-01, 2.0094e-01, 1.0102e-01,
-2.1311e-01, -1.5088e-01, 1.8175e-01,
4.6946e-02, -1.3963e-01, 1.0220e-01,
1.7536e-01, -2.4758e-01, -1.1481e-02,
6.1596e-02, -4.0352e-01, -1.4348e-01,
3.1690e-02, 1.7240e-01, 7.0780e-02,
9.9953e-02, -1.4154e-01, -8.3038e-02,
1.4527e-01, -2.1430e-01, -7.5840e-02,
1.6146e-01, 3.7508e-02, 5.3833e-02,
1.6723e-01, 1.7113e-01, -4.8512e-02,
2.1319e-01, 4.7031e-01, 1.1570e-01,
2.0330e-01, 2.4636e-01, 6.9924e-02,
-2.1165e-40, -1.9259e-40, -5.0990e-41,
-7.1298e-42, -4.2590e-41, 3.1709e-40,
4.1065e-40, -4.2585e-41, 3.4243e-40,
-1.0338e-40, 4.6039e-40, -3.3818e-40,
-3.9589e-41, 5.9574e-40, -5.8014e-41,
1.4505e-41, -3.5326e-40, -3.9806e-40,
4.2423e-40, -1.7055e-40, -4.9666e-40,
2.2853e-40, -2.4684e-40, -1.3794e-40,
-5.6764e-40, -1.7905e-40, -5.8915e-40,
-1.4755e-27, -2.0405e-28, -4.8677e-30,
-7.1151e-28, -9.7603e-29, -3.5264e-30,
-2.7455e-29, -5.7734e-30, -2.8633e-31,
-5.9960e-06, -5.9595e-06, -5.8686e-06,
-6.0381e-06, -6.0191e-06, -5.9605e-06,
-5.9849e-06, -5.9981e-06, -5.9654e-06,
-4.8277e-22, -7.0529e-22, -8.7179e-22,
-4.6334e-22, -6.3505e-22, -8.8438e-22,
-3.3883e-22, -4.2421e-22, -5.9002e-22,
-2.9574e-40, 4.0860e-40, -1.5966e-40,
-6.7527e-41, 7.6661e-41, -5.9491e-40,
3.0843e-40, 8.1079e-41, -2.5140e-40,
-3.7315e-40, 9.4787e-41, 4.6794e-40,
1.9383e-40, 5.0336e-41, 3.0561e-40,
-5.4286e-40, 5.5999e-40, -4.6977e-40
}
,
{
-1.7778e-01, 5.2351e-03, 1.6035e-02,
-9.7482e-02, -1.1056e-02, -5.0999e-02,
1.7460e-01, -4.0005e-02, -5.0911e-02,
-9.3843e-02, 1.2640e-01, -1.5016e-02,
-5.2880e-01, 1.9469e-01, -9.0037e-02,
-8.9136e-02, 9.8632e-02, -1.5009e-01,
-1.8080e-01, 1.1396e-01, -2.6178e-02,
-1.6689e-02, 1.4132e-01, -6.7769e-03,
-2.1120e-02, 6.8616e-02, -7.8209e-02,
4.8237e-02, -2.5303e-02, 1.7882e-02,
-4.2852e-02, -1.5071e-02, -3.3818e-02,
1.3635e-01, 4.5330e-01, 2.1489e-01,
2.7362e-02, -7.4152e-02, 2.3185e-03,
1.8771e-01, -2.0827e-02, -7.5581e-02,
1.4675e-01, -6.5552e-02, 4.2292e-02,
1.3990e-01, -4.1598e-01, 2.1609e-03,
1.5997e-01, 1.1375e-01, -1.8272e-02,
1.9045e-02, -4.2702e-02, -2.5602e-02,
1.6432e-01, -1.2783e-01, -1.8285e-03,
2.9414e-01, 1.7401e-01, -2.6321e-01,
-1.0125e-01, 1.3565e-01, 1.5894e-02,
-3.7351e-40, 6.3010e-40, -1.2071e-40,
-4.6380e-40, 1.8442e-40, -3.5994e-40,
-2.1459e-40, -4.3455e-40, -6.1978e-41,
-2.3638e-40, -4.6965e-40, -3.4232e-40,
-1.6517e-40, 4.7178e-40, -1.6757e-40,
6.7890e-41, -4.3000e-40, 1.8323e-40,
4.5416e-40, -2.9010e-40, -1.5200e-40,
-3.5533e-40, -8.7351e-41, 6.5595e-42,
5.1625e-40, -6.0418e-40, -2.7846e-40,
-2.1861e-10, -2.2422e-10, -2.1298e-10,
-2.2653e-10, -2.3500e-10, -2.2512e-10,
-2.1802e-10, -2.2681e-10, -2.1608e-10,
-3.2862e-40, 3.4241e-40, -1.3264e-40,
2.8762e-40, 1.3843e-40, 3.0949e-40,
-3.7702e-40, 2.6194e-40, 2.1451e-40,
-3.2283e-40, -5.5487e-40, 5.8744e-40,
1.6124e-40, 3.3512e-40, 3.1454e-40,
-3.5417e-40, -5.7692e-40, 5.5184e-40,
3.5641e-40, -4.3187e-40, -3.5314e-40,
4.9246e-40, 5.9593e-40, 8.3132e-41,
-2.3841e-40, -5.6196e-40, -3.2230e-41,
4.3824e-40, -3.8344e-40, -9.9086e-42,
-2.9323e-40, 2.1916e-40, 4.4739e-40,
5.6837e-41, 5.1796e-41, -2.4338e-40,
-2.2853e-40, -3.8920e-40, 6.1587e-40,
-2.9474e-41, 4.6214e-40, -3.6292e-40,
-1.4928e-40, -3.6708e-41, 5.2020e-40,
-1.2983e-12, -2.6539e-12, -1.9817e-12,
-6.5613e-12, -1.0255e-11, -6.6919e-12,
-8.3217e-12, -1.7832e-11, -1.1086e-11,
-4.9138e-40, -9.0061e-42, 4.6251e-40,
-2.9970e-41, -2.5468e-40, -3.5660e-40,
2.5450e-40, -9.5634e-38, -3.2369e-32,
-1.0233e-06, -8.2108e-07, -1.1668e-06,
-5.9592e-07, -3.9529e-07, -5.7435e-07,
-6.0253e-07, -3.8785e-07, -4.9365e-07,
-8.9372e-37, -2.1590e-36, -2.1060e-40,
-1.5666e-35, -1.1466e-38, -2.3366e-40,
-5.4077e-38, 5.0487e-40, -3.3736e-40,
-1.5357e-13, -8.4607e-14, -1.9206e-16,
-5.5373e-13, -3.0787e-13, -1.0513e-15,
-1.0468e-13, -8.6069e-14, -2.2453e-16,
-4.7501e-14, -1.3426e-13, -1.1133e-13,
-1.3801e-14, -2.4024e-14, -3.5120e-14,
-1.9817e-17, -1.3229e-17, -3.2854e-17,
-1.4365e-18, -4.1143e-15, -9.2614e-14,
-1.1174e-19, -1.6235e-15, -1.5600e-13,
-1.2643e-21, -3.9578e-17, -1.2038e-14,
-2.9789e-40, -4.6452e-40, 1.5649e-40,
-1.8445e-40, -5.2942e-40, 2.5130e-40,
6.2269e-40, 3.9166e-41, -2.4197e-40,
9.0835e-02, -5.2035e-03, -2.5980e-02,
-1.0090e-01, -7.4167e-02, 1.3364e-01,
1.0302e-01, -1.5250e-01, 1.2417e-01,
4.7205e-02, -2.3839e-01, -1.4983e-02,
5.6824e-02, -1.8259e-02, 9.6426e-02,
5.9740e-03, -1.4198e-01, -2.1076e-01,
-1.5837e-01, 6.4749e-02, -2.1417e-01,
-3.4048e-02, 4.9638e-01, 2.0984e-03,
-1.4335e-01, 4.8295e-02, -9.2209e-02,
1.9450e-01, -1.3603e-01, 1.2008e-01,
1.6803e-01, 5.6805e-02, 1.1518e-01,
5.9320e-02, -3.8200e-02, -1.1340e-01,
-8.6877e-02, 1.1533e-01, -4.9870e-02,
-7.2811e-03, 2.5730e-01, -1.8536e-01,
-6.4965e-02, 1.0364e-01, 1.3706e-02,
4.6974e-02, -1.0049e-01, -1.7460e-01,
-1.7910e-01, 3.0771e-01, -2.5757e-01,
-2.2846e-02, -3.7491e-03, -5.2171e-03,
-4.7762e-02, -4.7776e-02, 5.1125e-01,
-2.0210e-01, 6.4815e-02, -6.1606e-02,
7.3686e-04, -1.6226e-01, -3.0327e-02,
5.6501e-40, 5.2828e-40, -5.9773e-40,
-4.3530e-40, -1.1658e-40, 4.9705e-41,
4.8101e-40, 5.0236e-40, 2.0476e-40,
-1.1412e-01, 1.3391e-01, -1.2279e-01,
1.4370e-01, 3.7617e-01, 7.1407e-02,
6.9661e-02, 3.1963e-01, -1.7089e-02,
-4.7530e-02, 6.5411e-02, -2.4915e-02,
3.3429e-02, -1.3899e-01, -3.3875e-02,
-1.9261e-02, -1.3162e-01, 1.1415e-01,
2.0599e-02, -3.8667e-02, -7.2190e-02,
-2.1112e-01, -1.6525e-01, -2.3430e-02,
-1.2287e-02, -2.6637e-01, 1.0859e-03,
-2.8564e-02, 4.8846e-02, 4.2412e-02,
1.4632e-01, 1.5974e-02, -1.0699e-01,
5.5661e-02, -2.0952e-01, 2.4151e-02,
-2.3510e-02, -5.0570e-02, 1.0799e-01,
1.7495e-01, -1.5788e-03, -1.6447e-02,
7.7642e-02, -9.3888e-02, 1.3891e-03,
2.2658e-02, 1.4058e-01, 1.0639e-01,
-5.5626e-02, -3.0794e-01, -5.7160e-02,
1.0874e-01, -8.3907e-02, 4.2106e-02,
1.7688e-02, 1.8090e-01, -2.1718e-03,
-1.0659e-02, -2.1302e-01, 1.0056e-01,
-6.0693e-02, -2.3624e-02, 6.3688e-03,
-2.7320e-40, -1.3336e-40, 2.4202e-41,
-7.1225e-41, 1.2848e-40, 1.5426e-40,
-4.2798e-40, 6.5075e-41, 6.2629e-40,
1.6905e-01, -1.7379e-01, -2.1360e-02,
-2.9396e-01, 1.1782e-01, 7.9111e-02,
-6.4767e-03, -1.9949e-01, 5.4243e-02,
-3.2753e-02, -1.5810e-01, 5.2257e-02,
-1.8133e-02, 2.0548e-01, -2.8071e-01,
-5.3725e-02, 8.4067e-02, -7.4639e-02,
8.9137e-02, -2.3078e-01, -1.9626e-01,
3.1276e-01, 1.5332e-01, -1.9590e-01,
-1.8318e-02, 6.8460e-02, 9.1476e-03,
8.2398e-02, 8.5883e-03, 7.6830e-02,
-1.4580e-01, 4.6253e-01, -3.1900e-01,
-1.1051e-01, 6.3807e-02, -2.5130e-02,
-1.2029e-01, -3.8982e-03, 2.1654e-02,
-3.2017e-01, 2.0265e-01, -1.7311e-01,
-1.3229e-02, 1.3805e-01, -6.2689e-02,
-3.6619e-02, -1.9366e-01, 2.7177e-01,
5.5937e-02, 7.9713e-02, -2.3872e-01,
-3.9690e-02, 2.2914e-02, -1.7779e-02,
1.1110e-01, 1.6618e-01, 3.6139e-01,
7.9777e-02, 4.3655e-01, 3.0597e-01,
-5.5125e-02, 6.1229e-02, 1.2414e-01,
2.1644e-40, 7.2343e-41, 5.5580e-40,
-4.3927e-40, 5.0561e-40, -1.5560e-41,
-3.2783e-40, -8.8219e-41, 5.4415e-40,
-6.7176e-02, -3.4930e-02, -2.7087e-02,
1.0489e-01, 2.1178e-01, -1.6752e-01,
-1.2627e-01, -2.4207e-01, -7.4667e-02,
-3.1470e-02, -1.3365e-02, 8.7742e-02,
-2.2809e-02, -4.7991e-01, 2.4740e-02,
6.4418e-02, 3.4818e-02, -2.9275e-01,
-2.8830e-01, -7.0458e-02, 7.8922e-02,
-1.4436e-01, 4.1068e-02, 6.2896e-02,
4.1061e-03, 2.1844e-01, 9.0488e-02,
-1.1085e-01, 8.3761e-02, 3.2634e-02,
3.2470e-01, -2.7760e-01, 4.1235e-02,
8.6625e-02, 2.6816e-01, -1.3560e-01,
3.8789e-01, 3.2406e-01, 1.0631e-01,
7.5131e-02, -2.0206e-01, 1.3027e-01,
4.0382e-02, 2.4350e-01, -3.6042e-03,
-1.0063e-01, 1.9418e-01, -7.7039e-02,
9.4531e-03, 7.1605e-02, 1.4004e-01,
-2.0591e-02, 4.5944e-02, -2.6721e-03,
-3.4665e-03, 2.2560e-01, -8.2930e-02,
-1.5507e-01, 2.7206e-01, -2.8665e-02,
-3.4909e-03, 1.7696e-02, -8.5492e-02,
2.1541e-40, -3.3029e-40, 1.7678e-40,
-3.9857e-40, -1.1965e-40, -8.6754e-41,
-4.0721e-40, 2.2073e-41, 4.2728e-40,
-1.0496e-02, 5.4120e-02, -1.6498e-02,
-5.9387e-02, 2.3757e-01, -8.0381e-02,
2.3739e-02, -1.3715e-01, -3.0906e-02,
-8.5760e-03, 2.4518e-02, -6.9090e-02,
2.1623e-02, 8.9641e-02, 9.9031e-02,
-1.0052e-02, 4.6506e-02, -1.5756e-01,
8.5003e-02, -3.6434e-03, 1.3816e-02,
9.0532e-02, 2.3661e-01, 1.8077e-01,
2.8120e-02, 4.3753e-02, 2.2981e-02,
3.5830e-02, 5.7995e-02, -5.6879e-03,
3.7708e-02, -2.6373e-01, 2.0886e-01,
-4.0632e-02, 1.6891e-01, -6.8996e-02,
-1.1972e-01, -4.3628e-02, 2.0278e-02,
-1.4818e-01, 4.0844e-02, 1.5917e-01,
-4.5684e-02, 1.4075e-01, -2.0784e-02,
-1.1533e-03, -2.7897e-01, -8.8707e-02,
-1.7907e-02, 1.8400e-01, 1.1026e-01,
-2.3183e-03, 6.3875e-02, -4.2394e-03,
3.2021e-02, -8.8955e-02, -2.2298e-02,
8.1353e-02, 3.3079e-01, -2.0616e-01,
-3.5802e-02, 4.9804e-02, -9.2712e-02,
-1.5940e-07, -1.6158e-07, -1.5812e-07,
-1.6273e-07, -1.6555e-07, -1.6260e-07,
-1.5867e-07, -1.6192e-07, -1.5975e-07
}
,
{
-1.5080e-02, 1.1294e-01, 7.1187e-02,
1.1628e-02, -8.4938e-01, 8.5457e-02,
-3.9642e-02, -2.3879e-02, 1.0029e-02,
2.6648e-40, 9.1590e-41, 3.3285e-40,
-3.3445e-40, -2.5194e-40, -2.0946e-40,
3.6800e-40, -1.1584e-40, 6.2195e-40,
-1.3560e-41, -8.0151e-41, 4.4048e-40,
-4.1209e-40, 2.7411e-40, 3.2419e-40,
5.8333e-40, 1.1503e-40, -5.0783e-40,
-5.5301e-02, -2.4971e-02, 4.9251e-02,
-2.5589e-01, 1.6560e-01, -8.0956e-02,
4.0518e-01, 3.1320e-02, -1.4262e-01,
1.2250e-02, 5.1989e-02, 3.0706e-03,
-7.9534e-02, -1.9801e-01, -2.7791e-02,
2.1768e-01, 6.9978e-02, -4.2325e-02,
-1.9165e-02, -2.1179e-02, -2.1558e-02,
3.6816e-01, -5.2929e-02, 9.5790e-02,
2.8095e-01, -1.4731e-01, 3.4182e-02,
2.3702e-02, 4.0764e-02, 3.5767e-02,
-8.4586e-02, 1.9025e-01, -1.6794e-01,
-1.0273e-02, 3.2259e-01, -1.5841e-01,
2.6794e-01, 5.2084e-02, 1.2761e-02,
-1.1169e-01, -1.7808e-01, 1.1363e-01,
-1.3808e-01, -1.7764e-02, -1.7420e-02,
1.5840e-02, -2.3405e-01, 7.6361e-03,
-6.6082e-02, 7.9778e-02, -2.0423e-01,
-1.9594e-02, -6.3370e-02, 3.3351e-02,
-2.0396e-40, -3.0207e-40, -3.2364e-40,
2.3575e-40, 5.8301e-41, -3.7432e-40,
-3.6291e-40, 3.3441e-40, 1.4574e-40,
-4.3792e-40, -2.5814e-40, -3.4986e-41,
-3.4920e-40, -4.4757e-40, 3.2192e-40,
4.7222e-40, -7.3197e-41, -3.4635e-40,
5.1495e-02, 7.8843e-02, 4.2243e-02,
-2.1245e-01, 1.9568e-01, 7.9369e-03,
2.2795e-02, 2.2801e-02, 7.6895e-02,
3.0044e-01, -1.4041e-01, -2.3677e-02,
-1.1656e-01, -7.5113e-02, 1.0625e-02,
-1.2133e-02, 5.0658e-02, -7.2944e-02,
-3.3652e-02, -2.0452e-01, -4.1048e-02,
2.8531e-01, 1.2116e-01, -2.1526e-02,
-2.4564e-01, -4.1870e-02, -5.5819e-02,
-2.3157e-01, -2.5594e-02, 1.1154e-01,
2.1234e-01, 3.2762e-01, -2.9000e-01,
1.8591e-02, -5.9820e-02, -9.0807e-02,
-3.0027e-01, -1.8370e-01, 1.2086e-02,
2.1178e-02, 2.9559e-01, 1.2966e-01,
6.8542e-02, 7.7710e-03, -6.0304e-02,
3.3019e-03, -1.9135e-02, 9.3227e-03,
-9.9003e-03, -1.0101e-01, -3.3513e-01,
-8.4091e-03, -1.5918e-02, -3.4323e-02,
3.8770e-40, -2.8639e-40, 4.6953e-40,
4.2631e-40, 6.2568e-41, -5.3500e-40,
-2.1987e-40, 1.3435e-40, 4.4101e-40,
-3.9973e-40, 6.3046e-40, 1.6046e-40,
4.4338e-40, 1.6940e-41, 4.1598e-40,
2.6132e-40, -2.9888e-40, -7.5708e-41,
-1.5991e-02, 8.2749e-02, -6.3776e-02,
-3.2220e-03, 4.1443e-02, -8.1219e-02,
-1.1231e-01, 6.7586e-01, -1.7600e-01,
-4.0371e-02, -7.9044e-02, 1.2451e-01,
4.1907e-02, -8.8159e-02, -1.1229e-01,
-4.0654e-03, -4.4087e-03, 1.2942e-01,
9.3318e-03, -6.5085e-02, 1.0165e-02,
-2.8758e-02, -4.9997e-02, 4.6069e-02,
4.2107e-04, 2.1718e-01, 3.1080e-03,
-9.1277e-03, -2.8568e-02, 1.6202e-02,
-8.2490e-03, 1.2888e-01, -1.3159e-01,
1.6065e-02, 4.0143e-02, 2.7043e-01,
-3.4809e-02, -8.1302e-03, 6.0786e-02,
5.1845e-02, 4.6995e-01, -1.0392e-02,
2.3359e-02, -1.8364e-01, -3.7343e-01,
-8.2996e-02, 9.7724e-02, -6.1012e-02,
2.8225e-02, 8.8706e-02, 1.3443e-02,
3.7515e-03, 1.7772e-02, 6.5945e-03,
-7.3847e-12, -7.5629e-12, -6.9337e-12,
-7.6292e-12, -7.8624e-12, -7.2877e-12,
-7.0582e-12, -7.3197e-12, -6.8467e-12,
1.5445e-11, 2.0754e-11, 2.0524e-11,
2.1239e-11, 2.5909e-11, 2.5983e-11,
2.0986e-11, 2.5190e-11, 2.2478e-11,
-4.7164e-02, -2.4754e-02, -1.8256e-02,
1.0526e-01, -4.6010e-03, -2.2784e-02,
-5.2028e-02, -1.6408e-01, 7.9112e-03,
-8.1863e-02, 4.2772e-02, -9.9446e-04,
-5.5521e-02, -1.1264e-01, -4.5782e-02,
-1.1026e-01, 2.1443e-02, -4.5120e-02,
-1.4141e-02, -2.8116e-03, 2.6990e-02,
-2.0201e-01, 4.3214e-01, 2.9373e-02,
-2.1768e-01, -2.7230e-02, 5.5396e-03,
5.0196e-02, 1.5506e-01, -5.7328e-02,
4.8323e-02, 3.8243e-02, -1.3533e-01,
-9.8862e-03, -5.6971e-02, -7.1500e-02,
1.0272e-01, 7.4686e-02, 7.4732e-02,
8.3744e-02, 1.5834e-01, 2.9221e-02,
6.5641e-02, 7.7697e-02, 3.5746e-02,
-1.6614e-01, -2.3128e-01, 4.4691e-02,
6.3546e-02, -3.8105e-01, 3.4110e-02,
-3.5022e-02, -2.3782e-02, 2.8664e-02,
-3.8813e-41, -2.8626e-40, -9.0218e-41,
4.1216e-40, -4.4215e-40, 3.1198e-40,
5.6281e-40, 2.0477e-40, 2.7797e-40,
-4.4903e-40, -6.2574e-41, 4.9971e-40,
5.0135e-40, -3.1945e-40, -2.4694e-40,
2.6587e-40, -4.9583e-40, -4.9771e-40,
3.7139e-02, 5.2936e-04, -2.3658e-02,
-3.6199e-01, -5.1912e-02, -5.1969e-02,
2.5415e-01, 2.4109e-01, 9.8721e-03,
5.5061e-02, -4.7469e-02, 3.0045e-02,
2.1565e-03, -2.3866e-02, -2.3496e-02,
6.0892e-02, -4.6442e-04, -5.0200e-02,
5.4971e-02, -1.7234e-02, -3.2759e-03,
4.8225e-01, -1.1234e-01, 3.8257e-02,
5.2105e-02, -2.8473e-03, -1.0355e-02,
-9.5654e-03, -1.8751e-01, 1.7079e-02,
7.0133e-02, 7.6363e-01, -8.7388e-02,
-5.6536e-02, -1.9152e-01, -1.6043e-01,
2.0359e-01, 7.4214e-02, 3.1970e-02,
-1.8199e-01, -1.9386e-01, -2.5967e-03,
-3.4609e-02, 3.3870e-02, 5.8835e-02,
8.8220e-02, 9.9265e-02, 7.1240e-03,
-9.1395e-02, -3.1699e-01, -2.9120e-02,
-1.8436e-02, -2.1432e-02, -4.5465e-02,
-3.2013e-40, 3.2019e-40, 4.8747e-41,
2.6585e-40, 6.1463e-40, 1.4176e-40,
-1.5286e-40, 3.0543e-40, 7.2032e-41,
-6.0758e-40, -3.6200e-40, 1.2123e-40,
1.3627e-40, 3.2983e-40, 3.6171e-40,
-4.2148e-40, 1.1102e-40, 3.2714e-40,
-3.4763e-02, -3.1632e-02, 3.0044e-02,
-2.0935e-01, 1.3533e-01, -9.1607e-03,
-1.5931e-01, 1.0771e-01, -6.6518e-02,
2.4399e-02, 2.2923e-03, 5.1575e-02,
-1.4154e-01, -1.0013e-02, -7.5696e-02,
1.0849e-01, 1.2575e-01, -7.3161e-02,
-1.5217e-02, -2.7659e-02, -3.1401e-02,
3.4960e-01, 7.2390e-02, 2.0722e-02,
3.9440e-01, 9.1821e-04, 1.7842e-02,
-1.5670e-02, 5.3020e-02, 6.0536e-02,
-1.8853e-01, 2.7532e-01, -1.9681e-01,
8.3258e-02, 9.4285e-02, -1.2695e-01,
2.7593e-01, 1.1456e-01, 1.6048e-02,
-5.1675e-01, 1.4727e-01, 7.5170e-02,
-6.9143e-02, -9.2948e-02, 3.4687e-02,
1.4128e-02, -7.9962e-02, 8.0446e-02,
3.7011e-02, -1.3400e-01, -2.0725e-02,
-6.4981e-03, 7.0724e-02, 6.6167e-02,
-4.5940e-41, 2.5437e-40, -3.3111e-40,
5.9661e-40, 6.2521e-40, 5.6418e-40,
1.9187e-40, -5.8872e-40, 5.5747e-40,
-1.6402e-11, -2.2097e-11, -1.7224e-11,
-2.2755e-11, -2.9977e-11, -2.1231e-11,
-1.3688e-11, -1.7479e-11, -1.3081e-11,
6.4790e-03, -3.8464e-03, -1.0008e-02,
-2.6001e-02, -7.9483e-02, 3.3711e-02,
2.6659e-03, -3.2634e-02, 1.0767e-02,
4.9939e-03, 1.4064e-02, -3.4294e-02,
4.8529e-02, 6.3386e-01, -3.6805e-02,
-1.3703e-01, 2.5878e-02, -4.8617e-02,
3.2186e-02, 6.6382e-02, 1.9305e-02,
7.0196e-02, -1.6892e-01, -2.8980e-02,
9.7762e-02, 9.7998e-03, -5.1620e-03,
5.0753e-02, -4.5071e-03, -3.9836e-02,
-6.0381e-02, -9.2016e-02, 9.5433e-02,
-1.0045e-02, 8.7955e-03, 4.9429e-02,
-1.8363e-02, -1.1912e-01, 9.7347e-03,
-1.5657e-01, -2.1035e-01, -4.9737e-02,
-3.0025e-02, -6.4959e-02, -5.6107e-02,
3.2927e-40, 5.7263e-40, 6.2889e-40,
-6.0716e-39, 5.3050e-41, -1.7152e-40,
-3.2493e-38, -1.5841e-40, -1.9343e-40,
4.9763e-40, 5.5142e-40, -4.3462e-40,
-2.2649e-40, 1.4321e-40, -2.6779e-40,
2.3072e-41, 5.4080e-40, -6.4200e-41,
2.2827e-40, -5.4515e-41, -4.1768e-40,
3.9033e-40, 6.1988e-41, 5.9877e-40,
-4.3355e-41, -5.1088e-40, 5.9845e-40,
-4.8238e-40, -1.8586e-40, 4.8699e-40,
-9.7225e-41, 4.3387e-40, -4.3683e-40,
-7.9278e-41, -5.3614e-40, 2.1911e-40,
-3.3982e-40, -5.3335e-40, 3.8540e-40,
1.9051e-40, -2.0840e-40, 2.2868e-40,
-3.5020e-40, -3.4276e-40, 2.7395e-42,
3.9197e-40, 6.1843e-40, -1.5888e-40,
4.3516e-40, -6.1852e-40, -5.3692e-40,
-4.3268e-40, 3.5154e-40, 3.4477e-40,
-4.8414e-40, 2.2647e-40, -2.5591e-40,
4.6326e-40, -3.0462e-40, 4.7817e-40,
-4.9853e-40, -5.3425e-40, -2.9848e-40,
-1.3329e-07, -1.3784e-07, -1.3049e-07,
-1.3376e-07, -1.3905e-07, -1.3204e-07,
-1.2479e-07, -1.2994e-07, -1.2410e-07
}
,
{
-2.5964e-02, 2.9670e-02, 1.2100e-01,
-3.0371e-02, -1.5277e-02, -1.8589e-01,
-1.8650e-02, -1.2852e-01, -6.6297e-02,
9.7934e-04, -5.1835e-02, -1.0278e-03,
-1.2336e-02, 2.2130e-01, -1.2373e-01,
-2.3451e-02, 3.4217e-02, -1.0118e-02,
-3.0558e-01, -8.5390e-02, -1.4360e-02,
1.2473e-01, -1.7005e-02, -3.6816e-02,
-8.9125e-02, -6.1400e-02, -2.0623e-02,
1.3736e-02, 1.2441e-02, -4.3491e-02,
6.4806e-02, 3.7012e-01, 3.8064e-02,
-1.3731e-02, -2.4859e-01, -2.5450e-01,
-6.5111e-03, -1.4271e-01, -5.0481e-02,
5.3240e-02, -3.4843e-02, -2.2703e-02,
3.7414e-02, 1.0334e-01, -7.2237e-02,
1.4216e-02, 3.4231e-02, -2.0890e-02,
2.7879e-02, 1.3717e-01, 4.5864e-03,
3.0460e-03, -1.1734e-01, 4.4439e-02,
6.4825e-03, 1.6324e-02, 1.4928e-02,
-8.8420e-02, -1.0779e-01, -9.0653e-02,
3.1086e-02, -2.9067e-02, -8.8488e-02,
-1.6779e-40, -6.3646e-41, -6.2486e-40,
2.3154e-40, 2.8049e-40, 3.7718e-40,
-3.3950e-40, -3.1501e-40, 5.8709e-40,
2.1435e-02, -4.3732e-01, 1.5520e-02,
3.4080e-02, 1.9912e-01, -8.1413e-02,
-3.2816e-02, 5.7844e-02, 8.9258e-03,
-1.1662e-02, -1.1721e-02, 4.3033e-02,
5.2135e-02, -2.2503e-01, 2.3941e-01,
3.8400e-02, 1.8075e-01, -1.4776e-01,
2.6784e-01, 2.2817e-01, -3.0553e-03,
-6.7998e-02, -1.2050e-01, 1.4714e-02,
2.4045e-02, -1.4329e-02, -1.6705e-02,
-1.1421e-02, 4.2139e-02, 4.2944e-02,
1.8809e-02, -2.5221e-01, 9.7562e-02,
-4.1600e-02, 4.0069e-03, 7.5290e-02,
-2.0092e-02, 2.3537e-01, 2.4356e-02,
3.1957e-02, -4.8573e-02, 2.9379e-02,
6.4562e-03, -1.1527e-01, -9.1223e-02,
-2.3432e-02, 5.2881e-02, -7.3239e-02,
-3.7048e-02, -2.1481e-01, 5.9801e-05,
-4.2646e-02, -1.8366e-02, -1.0681e-01,
-1.3366e-01, -1.7123e-01, -3.5629e-02,
1.1216e-01, 1.1479e-01, 9.5297e-02,
2.4728e-02, -7.3135e-03, -3.4373e-02,
-2.3917e-40, -4.1869e-41, 3.7775e-41,
2.8931e-40, -9.4850e-41, 2.5694e-40,
3.3549e-40, -2.4334e-40, -5.5933e-41,
-2.0900e-02, 2.1203e-02, -4.7169e-02,
2.3632e-02, -7.1148e-01, 4.9722e-02,
-7.8963e-03, 5.0689e-02, 2.2619e-02,
-4.7364e-03, 3.2037e-02, 1.1004e-02,
-4.3001e-03, 2.5245e-01, 5.9112e-02,
2.8932e-02, -1.1267e-01, -2.3739e-01,
-6.5379e-02, 5.2462e-03, -1.6807e-02,
1.0960e-01, 1.7943e-01, -6.3043e-03,
9.3102e-02, 7.3103e-02, 2.5259e-02,
5.6835e-02, 4.0467e-02, 2.5447e-03,
9.4599e-02, 2.5222e-01, 6.9855e-02,
4.4758e-02, 1.8073e-01, 1.5075e-01,
2.0329e-02, -4.9412e-02, 2.0663e-02,
-7.1648e-03, 1.4986e-01, 2.1212e-01,
2.7657e-02, -6.8660e-02, 1.7321e-02,
1.0629e-02, -1.0722e-02, 2.8247e-02,
-1.1303e-02, 1.0076e-01, -4.0592e-01,
2.6744e-02, 7.3650e-02, 5.7966e-02,
2.8122e-02, -7.5961e-02, -9.4797e-03,
-1.3010e-01, -5.4184e-01, -1.3619e-01,
-1.8661e-03, -1.4357e-01, 7.9520e-03,
-1.3538e-09, -1.6580e-09, -1.7289e-09,
-1.2386e-09, -1.5132e-09, -1.5987e-09,
-1.1157e-09, -1.3420e-09, -1.4090e-09,
1.5441e-02, -1.8142e-01, -8.6802e-02,
-4.0983e-02, 2.4351e-01, -5.8181e-02,
-2.9568e-02, 3.9561e-03, 3.4181e-02,
-2.9210e-02, 2.5403e-02, 9.1331e-02,
2.3621e-02, 2.3954e-01, 5.2487e-02,
1.6509e-02, -6.2728e-02, 1.3448e-02,
1.2855e-01, 1.1892e-02, -1.3356e-02,
1.0810e-01, 1.6760e-01, -3.2040e-02,
6.2209e-02, 4.0682e-02, 3.9772e-02,
-6.1711e-03, 5.0588e-02, -1.0811e-01,
1.5744e-02, 1.6091e-01, -6.1739e-02,
-5.6717e-02, -1.0657e-02, -3.7943e-02,
-4.0595e-02, 8.0149e-02, 2.0216e-02,
3.8838e-02, -6.3586e-01, 2.3785e-01,
-1.0472e-02, 6.3899e-02, -8.2184e-02,
-1.9137e-02, 8.1163e-02, 6.7065e-02,
-2.2377e-03, 1.1860e-01, 3.4122e-02,
1.0501e-02, 2.9851e-02, 7.5841e-02,
5.8970e-02, -1.2188e-01, 7.7982e-02,
-2.6516e-02, -4.1289e-01, 2.1471e-02,
3.3957e-02, 3.5762e-02, -5.7857e-02,
-2.7357e-30, -3.4780e-30, -3.0306e-30,
-1.5188e-30, -1.9888e-30, -1.8755e-30,
-7.7431e-31, -9.7571e-31, -9.7402e-31,
-1.8497e-02, -2.4554e-02, 1.4428e-01,
1.4217e-02, -2.3647e-01, 8.4097e-02,
-1.0251e-02, -4.2137e-03, 6.0831e-03,
1.7742e-03, 2.1487e-02, 3.3147e-02,
-1.0971e-02, 3.0162e-01, 5.2391e-02,
1.8341e-02, -1.3390e-01, 9.4303e-02,
-1.5685e-01, 9.8434e-02, -1.2502e-03,
3.1370e-01, -2.8879e-02, 2.6313e-03,
1.7548e-02, 6.6741e-03, -1.7681e-03,
5.2062e-02, 6.6914e-02, 7.5256e-03,
2.4966e-02, 2.8081e-01, 2.9815e-02,
2.2375e-02, 1.4257e-03, -7.4702e-02,
1.5372e-02, 3.9587e-02, 4.6909e-02,
-2.2911e-02, -1.4568e-01, -3.8964e-01,
2.2850e-02, -4.2297e-02, 6.5736e-02,
-6.9905e-03, -6.3972e-02, -1.8430e-01,
4.4453e-03, 2.0687e-01, 3.0032e-01,
1.7243e-02, 9.8548e-03, -9.7476e-02,
-7.9682e-04, -2.1199e-01, -4.3461e-02,
-4.2929e-02, -2.8227e-01, 2.8997e-02,
-1.8741e-03, 1.1166e-02, 1.8381e-03,
-5.6725e-16, -1.0368e-15, -1.1480e-15,
-5.5537e-16, -9.9929e-16, -1.1499e-15,
-3.8787e-16, -6.4019e-16, -7.7595e-16,
4.4505e-02, 8.8803e-02, 1.1384e-02,
-3.9434e-02, 1.9319e-01, -1.2016e-02,
-4.6072e-02, 1.1769e-01, 7.4816e-03,
-3.7856e-02, -1.7147e-02, 1.5984e-01,
-2.6459e-02, 1.7469e-01, 1.2584e-01,
1.6387e-02, 1.7370e-01, -1.7350e-01,
-3.0008e-01, 2.1485e-01, -5.4302e-02,
5.7724e-02, 3.2168e-01, -2.5261e-02,
6.9277e-02, 7.5035e-02, 6.3485e-02,
-1.1688e-01, 2.6068e-02, -1.3490e-01,
-1.6085e-01, 1.9409e-01, 1.1434e-01,
-7.3819e-02, -7.7880e-02, 7.3699e-03,
-9.9972e-02, 1.3554e-01, 2.1656e-02,
-8.8303e-02, 5.4435e-01, -4.0582e-02,
-3.4805e-02, -1.5291e-01, -3.6917e-02,
-3.4377e-02, -3.3086e-02, -9.5097e-02,
-7.4538e-03, 2.2545e-01, -2.6380e-02,
1.4440e-02, 1.3205e-01, 1.6164e-01,
9.2164e-02, -8.4307e-02, 7.8922e-02,
1.2519e-01, -6.1809e-01, -1.0895e-01,
6.2744e-02, -4.4951e-02, -3.2548e-02,
-2.5422e-21, -6.3849e-21, -9.5560e-21,
-1.9248e-21, -4.7107e-21, -6.4244e-21,
-1.4638e-21, -3.1947e-21, -3.7663e-21,
-8.6113e-03, -7.0987e-02, 5.8265e-02,
-1.3148e-02, 5.6371e-01, 5.0580e-02,
1.1741e-02, -3.5614e-02, -6.1265e-02,
1.4758e-03, 3.3349e-02, -1.0867e-02,
-4.0234e-02, 1.9894e-01, 1.3972e-01,
-1.9167e-02, -4.1723e-02, -1.9982e-01,
-3.0756e-01, 2.6284e-02, -1.9058e-02,
-7.9349e-04, 1.2644e-01, 2.9567e-02,
-3.9274e-02, 1.1030e-02, -9.4885e-03,
1.3541e-02, 1.7044e-01, 8.9626e-02,
6.6814e-02, 2.6430e-01, 1.7409e-01,
-6.1034e-04, 1.7569e-02, 1.3090e-01,
-4.1941e-03, 8.9599e-02, -3.3684e-02,
-1.1310e-02, -4.3731e-01, 5.7177e-02,
-4.5718e-04, 1.0175e-01, 4.1211e-02,
2.9756e-02, -1.1601e-01, -7.3171e-02,
2.7939e-02, 2.1334e-01, -4.0210e-01,
-8.6847e-03, 8.1829e-02, 4.4225e-02,
-1.1411e-01, -1.7697e-01, -5.8087e-02,
7.9613e-02, -4.2814e-01, -1.0814e-01,
-3.0610e-02, 1.1342e-03, -2.2322e-03,
-1.1254e-10, -1.4207e-10, -1.5402e-10,
-9.9123e-11, -1.2394e-10, -1.3338e-10,
-8.8840e-11, -1.0857e-10, -1.1463e-10,
3.0283e-02, -5.6191e-02, -1.0447e-01,
-1.4578e-02, -2.8745e-01, 1.9089e-01,
-2.7251e-02, 9.8069e-02, -1.4580e-02,
-3.0276e-02, 1.4366e-02, 2.6363e-02,
-8.4962e-02, 7.8998e-02, -4.7717e-02,
-3.2004e-02, -2.1579e-02, 1.1247e-02,
1.3895e-01, -3.3900e-01, 7.7998e-03,
2.4769e-01, -1.8506e-01, -2.3116e-03,
3.1361e-02, -1.1718e-02, -1.8286e-02,
-1.3020e-01, 1.4334e-01, -5.5700e-02,
-3.5386e-02, 1.0992e-01, -8.0235e-02,
-5.8978e-03, 7.7039e-02, -7.4619e-02,
-8.1603e-02, 1.2982e-01, -7.3193e-02,
-6.1469e-02, 1.7131e-01, 4.0255e-01,
-6.4582e-03, -8.2741e-02, -2.2220e-02,
1.6876e-02, -3.2590e-02, 5.5645e-02,
2.5231e-02, 2.9984e-01, -3.6995e-02,
9.3322e-03, 2.0758e-01, -2.1986e-02,
-4.9568e-02, 2.1857e-03, 8.6127e-02,
8.6593e-02, -5.8134e-01, 3.4507e-01,
4.8855e-02, -1.0506e-01, 4.1584e-02,
2.5428e-40, -4.4558e-40, -2.2090e-40,
-2.9727e-40, -4.8454e-40, 3.0397e-40,
1.1696e-40, -3.3028e-40, -2.2959e-40
}
};
static __device__ __constant__ const float HDNL1biasL[8][8] =
{
{
-3.1869e-08, -3.8279e-01, -6.3693e-05, -5.9054e-02, 9.3774e-04, -2.9944e-02, -1.1156e-03, -7.5635e-02
}
,
{
-1.7701e-01, -1.3417e-06, -3.0706e-40, -1.9022e-06, -1.2965e-02, -6.6444e-40, 1.4699e-02, 2.6082e-02
}
,
{
-3.7577e-07, 4.4550e-03, -8.1266e-04, 3.2408e-01, -1.1321e-07, -1.8907e-23, -1.9770e-25, -3.2394e-02
}
,
{
-2.1525e-14, -1.4130e-02, -1.9410e-02, -1.8703e-02, -2.9177e-02, -4.0635e-02, 7.8097e-02, -1.1643e-01
}
,
{
-2.6309e-02, -2.2238e-02, 6.8700e-03, -1.7973e-02, -1.0893e-02, -1.1888e-02, -4.9598e-03, -6.3663e-06
}
,
{
-1.2406e-03, -2.4901e-12, -9.7265e-07, 6.3490e-03, 1.3495e-01, -3.8411e-03, -6.6630e-03, -7.3614e-03
}
,
{
-2.7729e-03, -4.8174e-03, -6.3012e-03, 2.0491e-01, -2.0110e-03, -3.0974e-03, 5.1407e-01, -3.5016e-08
}
,
{
0.0324, 0.0140, 0.6750, 0.2661, 0.3646, 0.3591, 0.5597, 0.0816
}
};
static __device__ __constant__ const float HDNL1kernelsL10[4 * 8] =
{
0.0882, 0.0422,
0.3775, 0.4754,
-0.3209, -0.4870,
-0.0384, 0.0530,
0.1034, 0.0173,
0.5011, 0.3900,
0.3621, -0.1645,
-0.1304, 0.0013,
0.2230, 0.3026,
0.1618, -0.4514,
-0.2097, 0.1894,
-0.0326, 0.1434,
0.2421, 0.3363,
-0.0938, 0.3156,
0.1137, -0.2165,
0.2273, -0.1284
};
static __device__ __constant__ const float HDNL2kernelsL1[9 * 8] =
{
-2.0676e-02, 6.7641e-03, 2.8287e-01,
2.5576e-01, 1.9765e-01, -2.4700e-01,
3.5056e-01, 2.9306e-01, -2.2245e-01,
8.4706e-02, -2.9455e-01, -5.5831e-02,
-8.4635e-02, -9.6835e-02, 3.1208e-01,
1.7690e-01, 2.7624e-02, 5.1954e-02,
-5.3869e-01, 7.2934e-02, -1.7662e-03,
-3.1402e-02, 3.1700e-01, 1.4965e-01,
3.8569e-02, 5.5025e-03, -6.6555e-03,
-4.2049e-38, -4.1971e-38, -4.1488e-38,
-4.2855e-38, -4.2871e-38, -4.2363e-38,
-4.1861e-38, -4.1974e-38, -4.1677e-38,
1.8451e-01, -5.4584e-02, 1.4494e-01,
1.3433e-01, 1.0073e-01, 2.6371e-01,
6.1261e-02, 2.2116e-01, 2.0074e-01,
5.9669e-02, -3.9168e-02, 2.1674e-01,
-2.9132e-01, 3.0285e-03, 1.2625e-01,
-4.3415e-02, 1.8663e-01, -1.6554e-01,
1.0102e-01, 6.3466e-02, 1.5225e-01,
2.1692e-01, 1.9860e-01, -7.0456e-02,
-1.6406e-03, -2.7834e-01, -3.5449e-01,
-3.0140e-01, -4.2348e-01, -5.8263e-01,
2.3140e-01, -2.6843e-01, -1.1069e-01,
-9.1484e-02, 1.1486e-02, 5.6396e-02
};
static __device__ __constant__ const float HDNL2biasL1[8] =
{
-9.0964e-02, 2.1136e-01, -1.2011e-02, -4.5657e-38, -1.4443e-01, 1.8968e-01, -2.9027e-02, 1.6199e-01
};
static __device__ __constant__ const float HDNL2kernelsL[8][9 * 8 * 8] =
{
{
4.4561e-02, 4.3527e-01, -8.9737e-02,
-4.9011e-03, 1.4879e-01, -8.2210e-02,
-1.7593e-02, 4.9294e-02, 1.8058e-01,
-3.3827e-02, -7.9055e-02, 2.6982e-01,
-5.2485e-02, -4.2046e-01, -5.6838e-02,
1.0919e-01, -7.3141e-02, 9.4797e-02,
6.2764e-02, 2.5475e-01, 1.3705e-01,
2.0997e-01, 7.3360e-01, 2.0801e-01,
-1.1500e-01, 3.1245e-01, 6.7457e-01,
-5.1481e-39, -5.1520e-39, -4.9367e-39,
-5.1383e-39, -5.1642e-39, -4.9479e-39,
-5.1323e-39, -5.1859e-39, -4.9547e-39,
1.3849e-01, 1.1564e-01, -1.8175e-01,
-5.5355e-03, -1.5117e-01, -2.4654e-01,
8.1590e-03, -1.1681e-01, 3.4700e-05,
-2.5950e-01, -1.4182e-01, 3.1814e-01,
1.7662e-01, 1.8420e-01, -1.5181e-01,
7.6233e-02, -7.8372e-02, -3.1968e-01,
-4.5770e-01, 4.1562e-02, 1.3721e-01,
-5.8444e-02, 3.3148e-02, -2.3370e-01,
1.5374e-01, -1.1162e-01, -7.4099e-03,
-1.5716e-01, -1.8356e-01, 2.1114e-02,
-3.2233e-01, 2.1064e-02, 2.7019e-01,
-1.3702e-01, 2.6969e-01, 2.1033e-01,
8.9027e-02, -7.9969e-02, 1.0096e-01,
6.6773e-02, 3.9558e-02, -7.4944e-02,
-5.9789e-02, 1.2265e-01, 3.3873e-02,
-9.7157e-03, 9.2906e-02, 6.0300e-02,
-2.2104e-03, 6.8198e-02, -1.2931e-01,
8.9288e-02, -1.2554e-01, -4.3270e-02,
1.0660e-01, 1.1609e-02, -1.2415e-01,
2.6372e-02, -3.6311e-02, 1.5625e-01,
-7.9595e-02, -3.3662e-01, -4.0760e-01,
-2.9566e-39, -2.8760e-39, -2.8816e-39,
-2.9566e-39, -2.8964e-39, -2.9115e-39,
-2.9566e-39, -2.9179e-39, -2.9130e-39,
7.9255e-02, 9.4548e-02, 8.8155e-02,
-2.8163e-02, 1.2428e-01, -6.4973e-03,
7.7875e-02, 7.4765e-02, -5.2405e-02,
-1.4886e-02, -7.1499e-02, -7.0719e-02,
9.7562e-02, 9.0948e-02, -5.6588e-02,
-1.2872e-02, -6.6390e-02, -6.4147e-02,
9.8262e-02, -2.4215e-01, -1.7051e-01,
1.8096e-01, 1.8106e-01, 1.3108e-01,
2.0649e-01, 1.2242e-01, 3.7225e-02,
-2.5125e-01, -1.0073e-01, 4.5330e-01,
1.8588e-01, -2.6809e-01, -1.5709e-01,
4.7668e-01, -2.4208e-01, -6.6012e-01,
1.3561e-01, 5.4109e-02, 6.1899e-02,
-1.9605e-02, 1.1349e-01, 3.5781e-02,
3.5513e-03, 3.1212e-02, -6.0399e-02,
5.9258e-02, -1.8175e-02, 7.3714e-02,
2.0052e-02, 4.3245e-02, -5.0879e-03,
-1.1082e-02, -1.0753e-01, -1.7896e-03,
2.9139e-02, 2.2747e-01, -6.4075e-02,
7.3097e-02, 1.5703e-01, -5.3815e-01,
1.0620e-01, -1.1386e-01, 1.7103e-01,
-3.8728e-39, -3.8299e-39, -3.8320e-39,
-3.9065e-39, -3.8445e-39, -3.8135e-39,
-3.8838e-39, -3.8114e-39, -3.8255e-39,
2.3253e-02, 6.9893e-02, 1.4774e-01,
9.6087e-02, 2.3102e-03, -3.4449e-02,
2.6819e-02, 1.0254e-01, -2.8200e-02,
3.9553e-02, 4.7191e-05, -5.5558e-02,
4.1641e-02, 5.8706e-02, -1.0337e-01,
1.1291e-01, 5.9622e-02, 7.0677e-02,
-2.5162e-01, 7.6659e-02, 1.7245e-01,
-5.8522e-02, 1.4365e-01, 2.1189e-01,
-2.8897e-02, -5.7365e-02, 1.4232e-01,
1.7854e-02, 1.7404e-03, -8.7356e-03,
-6.0777e-02, -6.2687e-02, -1.1500e-02,
-1.6468e-01, -2.5058e-01, -1.2798e-01,
2.3193e-02, 1.7209e-01, 1.6687e-01,
-3.4483e-02, -1.6846e-02, 2.5930e-02,
1.4410e-01, 4.2932e-02, -5.0149e-03,
4.7269e-02, 1.1276e-01, -9.2701e-03,
1.5323e-02, 1.3552e-02, 9.0256e-02,
-8.9393e-03, 7.0903e-02, -6.9379e-02,
1.8645e-01, 1.0543e-01, -1.5590e-01,
2.1056e-01, 1.1051e-01, -1.5514e-01,
-7.0484e-02, -1.5153e-01, -5.0873e-01,
3.2730e-39, 3.2358e-39, 3.1222e-39,
3.2642e-39, 3.2358e-39, 3.0921e-39,
3.2730e-39, 3.2358e-39, 3.0899e-39,
1.2225e-02, 1.2386e-01, 6.7712e-02,
3.1263e-02, 1.3617e-01, 1.5352e-01,
2.3405e-02, 8.5466e-02, 8.7303e-02,
-2.0372e-02, 8.3465e-02, -7.4233e-02,
1.2269e-01, 8.4046e-02, -3.6869e-02,
1.0242e-01, 7.3218e-02, -1.1496e-01,
-1.4539e-01, -2.3923e-01, -2.2818e-01,
-3.2368e-02, -7.4360e-02, 2.3493e-02,
1.7004e-01, 6.2924e-02, 8.9327e-02,
-1.1449e-01, -1.4973e-03, -7.0451e-03,
-9.3205e-02, -1.0312e-01, 4.6503e-02,
-2.2148e-01, -1.8111e-01, -1.1992e-01,
9.8140e-02, 9.9823e-02, -2.0282e-02,
-8.1973e-02, 1.4255e-01, -5.2392e-02,
8.0350e-03, -4.8299e-02, -7.7908e-02,
4.2383e-02, 3.0707e-02, 2.8560e-02,
1.0437e-01, 6.1290e-02, -9.7796e-02,
-1.7125e-02, -1.3572e-01, -1.5345e-01,
-1.3292e-01, 2.9477e-02, 6.8032e-02,
1.5741e-01, 4.0258e-01, 2.5838e-01,
1.3948e-01, 3.5713e-01, -3.9825e-01,
-1.9224e-39, -2.4076e-39, -2.4529e-39,
-1.9181e-39, -1.9894e-39, -4.0240e-39,
-1.9335e-39, -2.3920e-39, -4.0147e-39,
-2.1714e-02, -3.5299e-02, -7.5803e-03,
-2.4087e-02, 7.5265e-02, 7.6697e-02,
4.5309e-02, 8.9529e-02, 7.6510e-03,
1.0813e-02, 3.1294e-02, -2.5907e-02,
1.1962e-02, -6.8664e-03, -1.4084e-01,
7.7013e-02, -1.2305e-01, -6.7800e-02,
-9.7392e-02, 4.4082e-02, 1.4473e-01,
4.9436e-02, 2.8859e-01, 2.8252e-01,
-3.5828e-02, -7.5616e-02, 2.4875e-01,
-6.7684e-02, 1.1290e-01, 4.2827e-02,
-1.0860e-01, 1.2952e-01, 5.9784e-01,
-3.5402e-01, -3.9558e-02, -6.0775e-01,
-1.2854e-02, 1.5240e-01, 1.4115e-01,
-2.8134e-02, -1.2939e-02, -2.6203e-02,
1.1300e-01, 1.4481e-01, -5.1454e-02,
1.2688e-01, 2.8536e-02, 9.4877e-02,
9.6033e-02, -1.3901e-02, 6.0035e-02,
-1.1249e-01, 4.3971e-02, -1.0918e-01,
8.2500e-02, 2.1413e-01, 3.9015e-02,
1.8361e-01, 2.5271e-01, -2.2794e-01,
-8.1195e-02, -1.2269e-01, -2.6097e-01,
7.6827e-39, 7.7882e-39, 7.6893e-39,
7.7006e-39, 7.7857e-39, 7.7384e-39,
7.6985e-39, 7.7712e-39, 7.7399e-39,
1.4458e-02, 1.0801e-01, 1.5906e-01,
-1.4676e-02, 1.3699e-01, 9.2460e-02,
-3.6479e-02, 1.4529e-01, -2.8681e-02,
-3.3251e-02, -7.3096e-02, -1.4330e-01,
5.7009e-02, -3.1905e-02, -1.2035e-01,
1.1838e-01, 5.7011e-02, 2.0800e-02,
-1.1567e-02, -2.2125e-01, -9.3953e-02,
-7.5378e-02, -1.2069e-01, 1.3217e-01,
-7.7357e-02, -1.3171e-01, 1.2776e-01,
-1.1397e-01, -3.5183e-02, 2.2994e-02,
-6.5101e-02, -1.5019e-01, -2.7451e-02,
-2.4260e-01, -1.3543e-01, -1.9889e-02,
-1.9798e-39, -3.5282e-40, -1.9216e-39,
-1.9140e-39, -1.9370e-39, -1.9943e-39,
-1.8623e-39, -1.8665e-39, -1.9320e-39,
-4.8850e-39, -5.0283e-39, -4.9987e-39,
-5.0868e-39, -5.0814e-39, -5.0779e-39,
-5.2489e-39, -5.1086e-39, -5.1234e-39,
-2.9120e-39, -3.0278e-39, -2.9633e-39,
1.3186e-39, 6.0555e-39, 6.0419e-39,
-5.5922e-39, -8.5992e-40, -2.8529e-39,
-3.4668e-39, -3.5127e-39, -3.4668e-39,
-3.2831e-39, -3.4668e-39, -3.6734e-39,
-3.2142e-39, -3.2831e-39, -3.5816e-39,
1.3445e-39, 1.3621e-39, 1.3375e-39,
1.4539e-39, -2.2695e-40, 1.4522e-39,
1.3563e-39, 1.3339e-39, 1.3001e-39,
-4.4670e-39, -4.4026e-39, -4.3159e-39,
-4.5047e-39, -4.3505e-39, -2.7259e-39,
-4.5265e-39, -4.4721e-39, -4.4990e-39,
-1.9864e-39, -4.1379e-39, -3.7189e-39,
5.2465e-39, 2.5220e-39, 1.5639e-39,
-3.9760e-39, -5.7033e-39, -4.0978e-39,
-6.3745e-40, -4.7511e-39, 2.3456e-39,
-1.5164e-39, 5.0431e-39, 5.1197e-39,
8.7052e-40, 1.4947e-39, -1.1546e-39,
5.3140e-02, 1.0281e-01, 1.4767e-01,
-6.1530e-02, -9.4166e-02, 4.8671e-02,
5.6787e-03, -1.4551e-01, 1.5614e-02,
-3.4826e-02, -5.1148e-02, 9.7079e-02,
-1.3603e-02, -1.2249e-01, -1.9330e-02,
-6.8184e-02, -1.4344e-01, -9.4023e-03,
-7.4629e-02, 3.9634e-02, 1.3445e-01,
4.2153e-02, 7.1129e-01, 2.8703e-02,
7.8247e-02, 7.2210e-01, -6.6198e-01,
-6.1010e-39, -6.2892e-39, -6.4008e-39,
-6.0825e-39, -6.3221e-39, -6.3883e-39,
-1.4962e-39, -1.1702e-39, -1.2143e-39,
5.5512e-02, -2.1522e-02, 1.0866e-01,
-9.2812e-02, -3.5119e-02, 1.1396e-01,
-1.3922e-01, 6.7287e-02, -5.5626e-02,
-2.0492e-01, 8.1441e-02, -1.3513e-01,
4.7447e-02, 2.0081e-01, -3.1249e-01,
-1.8546e-02, 2.0680e-01, 7.3979e-02,
8.8928e-02, -4.3606e-01, -8.4823e-02,
-5.6133e-02, 3.5132e-01, 1.8633e-01,
-4.3855e-03, 5.4869e-02, 1.1658e-01,
1.7423e-01, -5.3107e-02, 2.2925e-02,
-1.7622e-01, 4.4453e-02, 2.8131e-02,
2.6863e-01, -2.9085e-01, -1.5098e-01
}
,
{
-2.4230e-40, 5.4425e-39, 3.4517e-39,
-1.9803e-39, -1.5207e-39, -3.5630e-39,
-4.9409e-39, -2.9280e-39, 7.7966e-40,
2.4867e-39, -2.1848e-39, 3.2524e-39,
-6.2860e-39, 4.0411e-39, -3.6956e-39,
-3.3384e-39, -1.0908e-39, 5.4261e-39,
-3.6691e-40, 9.4949e-40, -1.7279e-39,
-1.0644e-39, -2.1371e-39, -2.5125e-39,
2.9368e-39, -5.3820e-39, -3.9771e-40,
-1.4703e-39, -3.6960e-39, -4.4161e-39,
8.2800e-40, -4.9175e-39, 3.1868e-39,
5.5703e-39, -3.0263e-39, -1.6991e-39,
5.2691e-39, 4.8127e-39, 4.1346e-39,
-1.3013e-39, -1.7101e-39, -3.5467e-39,
1.1496e-39, 2.0938e-39, -4.2970e-39,
-5.5314e-39, 6.4852e-40, -5.0870e-39,
3.9377e-39, -4.1683e-39, -3.5404e-40,
-3.6188e-39, 5.4657e-39, 2.1279e-39,
3.4090e-40, 2.4425e-40, 9.3423e-41,
-2.3450e-39, 3.1518e-40, 4.3061e-40,
-2.6175e-39, -2.4696e-39, -2.3755e-39,
2.2764e-39, -4.4934e-39, 8.5722e-40,
5.1798e-39, 2.7072e-39, 5.3750e-39,
5.4335e-40, 3.8556e-39, -3.4799e-39,
-4.8963e-39, -1.1413e-39, -5.3918e-40,
6.1843e-39, -1.8521e-39, -1.3450e-39,
-2.0906e-39, -3.2544e-39, -2.8205e-39,
5.3550e-39, -3.0202e-39, -3.4181e-39,
-3.0043e-39, -3.2900e-39, -3.2915e-39,
6.1849e-39, -3.3421e-39, -3.3995e-39,
-4.8657e-39, -4.7034e-39, -4.7467e-39,
-4.6555e-39, -4.6045e-39, -4.6954e-39,
-4.8886e-39, -4.7333e-39, -4.7805e-39,
-2.0900e-39, -1.9429e-39, -2.0572e-39,
-2.0270e-39, -1.9074e-39, -1.9275e-39,
-2.1243e-39, -2.1134e-39, -2.1539e-39,
-4.4175e-39, -4.6412e-39, -4.6582e-39,
-4.6364e-39, -4.8757e-39, -4.6795e-39,
-4.4571e-39, -4.5038e-39, -4.4570e-39,
-3.2662e-39, -3.1163e-39, -3.2050e-39,
-3.2098e-39, -3.0887e-39, -3.1635e-39,
-3.3183e-39, -3.1411e-39, -3.2824e-39,
8.6839e-40, 5.7318e-39, 1.8373e-40,
4.6732e-39, -4.5549e-41, 1.2817e-39,
3.7642e-41, -6.2591e-39, -5.0492e-39,
5.0057e-39, 6.0612e-39, 2.0220e-39,
3.7436e-39, 4.8326e-39, 3.1353e-39,
3.5289e-39, 4.7177e-39, 6.2666e-39,
-1.4963e-01, -8.0360e-02, -7.9054e-02,
-1.3731e-01, 5.0766e-02, 6.9673e-02,
3.2213e-02, 3.3250e-02, 1.3170e-01,
-2.9718e-02, -2.6931e-02, 1.5768e-02,
5.9232e-02, 7.8471e-02, 9.9465e-02,
2.4872e-02, -4.4226e-02, 3.2357e-02,
-6.0139e-02, -2.2756e-02, -5.5412e-02,
4.5363e-02, 1.6393e-01, 3.7428e-02,
5.2497e-02, 9.5435e-02, 9.7155e-02,
8.2849e-02, 5.9711e-02, 1.4352e-01,
1.1756e-02, 1.5440e-02, 1.3039e-01,
4.3324e-03, 5.9119e-02, 1.1129e-01,
-3.9591e-03, 5.8617e-02, -1.3843e-02,
-2.9949e-02, 3.4877e-02, 5.0679e-03,
3.7278e-02, -2.5221e-02, 1.2191e-01,
1.5626e-01, 8.9797e-02, -1.5458e-02,
1.5607e-01, 1.4561e-02, 1.1720e-01,
-1.6112e-02, 7.7908e-02, -6.1322e-02,
3.8589e-39, 3.9262e-39, 3.8641e-39,
3.9450e-39, 3.8805e-39, 3.9383e-39,
3.8384e-39, 3.8027e-39, 3.7700e-39,
6.2294e-02, -5.6804e-03, -4.7293e-01,
1.3161e-01, 3.1187e-01, -1.8013e-01,
4.9908e-02, 9.8583e-02, 3.8863e-02,
-1.7400e-39, 3.5779e-39, 5.2800e-39,
-1.6845e-39, 4.7140e-39, 2.4244e-39,
-1.3654e-39, 2.4123e-40, -1.5360e-39,
-1.0409e-39, 1.8590e-39, -5.2161e-41,
-8.5110e-40, -1.7210e-39, -4.6624e-39,
5.0754e-40, -2.6248e-39, -5.4801e-39,
-4.9486e-39, 2.8984e-39, 4.9357e-39,
-1.4077e-39, 3.8778e-39, 5.8202e-39,
-4.1095e-39, 6.8891e-40, 5.6565e-39,
3.8021e-39, -5.4740e-41, 2.1795e-39,
-2.4185e-39, -5.8101e-39, 1.5651e-39,
-4.9775e-39, 6.0152e-39, -5.2337e-39,
-4.4350e-39, -3.8239e-39, 3.1624e-40,
-4.3665e-39, -3.0919e-39, -4.7675e-39,
-2.3335e-39, 1.8270e-39, -5.5077e-39,
5.5906e-39, 6.7732e-41, 3.7359e-39,
-5.1412e-40, -2.3239e-39, 5.1937e-39,
-4.4951e-39, -3.4928e-40, -5.0589e-39,
4.9149e-39, 1.1372e-39, 6.6368e-40,
-1.8870e-40, -5.9117e-40, -1.3973e-39,
-2.3555e-39, -1.0637e-39, 3.1692e-39,
-4.8054e-39, 4.8090e-40, 2.0873e-39,
3.8301e-39, -3.8642e-39, 4.8187e-39,
-1.6563e-39, 8.9890e-40, -3.5162e-39,
-2.3010e-01, -7.4445e-02, -1.0006e-01,
-2.4543e-01, -8.5750e-02, 1.4859e-01,
-1.3783e-01, 1.2709e-01, 2.5012e-01,
1.0310e-01, -2.3520e-02, -8.1277e-02,
-2.9267e-02, 1.0686e-01, 4.6287e-02,
-1.2342e-02, -1.7104e-02, 8.4357e-02,
-1.8492e-02, -2.0711e-02, -3.5242e-02,
7.6163e-02, 6.0853e-02, 9.4248e-02,
6.2008e-02, 1.1373e-02, 2.6609e-02,
-7.8135e-02, 1.0672e-01, -5.8380e-02,
7.1618e-02, 2.7966e-04, 1.1835e-01,
1.1306e-01, -7.8578e-03, 5.1743e-03,
-1.2123e-01, 4.9640e-02, 7.3827e-02,
-1.0377e-01, -3.7377e-02, -3.6536e-02,
5.7489e-02, -4.6279e-04, 9.0068e-02,
4.0784e-05, -3.3328e-02, 5.1191e-02,
9.6538e-02, 7.1779e-02, 1.2121e-01,
1.1598e-01, -5.9055e-02, 8.2671e-02,
-1.7292e-39, -1.7848e-39, -1.7308e-39,
-3.2817e-39, -1.7274e-39, -3.3601e-39,
-1.7252e-39, -3.4067e-39, -1.7783e-39,
-7.4053e-02, -4.2785e-01, -4.7597e-01,
4.6309e-01, 7.6018e-02, -3.5885e-01,
3.0428e-01, 8.7449e-02, 9.7880e-02,
-3.4191e-02, 1.1834e-01, -4.3273e-02,
-6.0782e-01, 9.2387e-01, -1.3972e-01,
3.0665e-01, 4.7445e-01, 4.8683e-02,
-1.8865e-02, 9.9509e-02, -4.9881e-02,
2.1640e-02, -2.0941e-01, -1.4779e-01,
1.7808e-01, -1.2572e-01, -9.6756e-02,
-1.0143e-01, 8.3153e-02, -1.0478e-01,
1.6201e-01, 2.0740e-01, -1.2653e-01,
8.1654e-02, -7.6224e-02, -8.9864e-02,
4.5383e-02, -3.6893e-02, -1.0096e-01,
2.0389e-01, 2.2557e-01, -1.9685e-01,
-9.5198e-02, 2.2877e-01, 2.1135e-02,
-1.0919e-01, -1.7563e-01, -3.5255e-01,
-1.3447e-01, 3.3709e-01, -1.9043e-01,
-2.1422e-01, -2.8848e-01, -5.3921e-02,
5.5351e-02, -5.0579e-02, -1.6168e-01,
2.5282e-01, 1.9715e-01, -2.4035e-01,
-3.0800e-02, 1.9329e-01, -1.0893e-01,
-3.4416e-39, -1.8080e-39, -1.6625e-39,
-1.6612e-39, -1.7397e-39, -1.5953e-39,
5.3047e-39, 5.4221e-39, -1.1665e-39,
2.1838e-02, -7.0635e-02, 3.6095e-01,
5.1096e-01, 6.3838e-01, 5.0716e-01,
1.1642e-01, 1.8546e-01, 1.5989e-01,
1.0799e-01, 2.8380e-01, 1.4910e-01,
-2.4305e-01, 2.3084e-01, -9.9982e-02,
-4.6839e-01, 6.0376e-01, -1.2748e-02,
8.7608e-02, 9.8828e-02, 2.1469e-02,
-3.5384e-03, -1.5689e-01, -1.1411e-01,
2.0728e-02, 5.6814e-02, -1.1090e-02,
-3.9301e-02, -9.4325e-02, -6.2119e-02,
1.2842e-01, 9.7466e-02, -2.7502e-02,
1.6560e-01, 1.5058e-01, 2.2821e-02,
-8.1287e-02, -6.3940e-03, 3.2162e-02,
9.4116e-02, -6.2567e-02, -1.2704e-01,
5.4654e-02, 1.4885e-02, 3.8166e-03,
1.9830e-01, -2.5419e-01, -6.7067e-02,
3.2303e-01, 1.6037e-01, -3.0200e-02,
1.3011e-01, 7.5455e-02, -1.2726e-02,
-1.9198e-01, -1.5419e-01, -7.5420e-02,
1.6070e-01, -6.1031e-02, -2.0179e-01,
-1.5829e-02, 1.9918e-01, 1.0960e-01,
-5.5215e-39, -5.8659e-39, -5.5573e-39,
-6.2394e-39, -6.0172e-39, -6.0159e-39,
-4.0308e-39, -4.1217e-39, -4.1372e-39,
1.6143e-01, 1.7271e-01, 4.3534e-01,
-2.4312e-01, 4.0146e-01, 4.4693e-01,
1.5442e-01, 3.9885e-01, -1.4357e-01,
-6.0236e-02, -1.2324e-01, 6.1197e-02,
-2.5842e-02, -1.0266e-02, 1.5670e-03,
2.9103e-02, 2.9966e-02, 1.1286e-01,
3.4528e-02, 1.3039e-01, 9.2736e-02,
3.5193e-02, 5.6583e-02, 5.9465e-02,
1.2846e-01, 9.3387e-02, 9.2131e-02,
1.4974e-03, 1.0196e-01, 6.7632e-02,
8.9809e-02, 5.7568e-02, -6.0621e-02,
-2.7582e-03, 3.1935e-02, 3.1299e-02,
1.3595e-01, 4.9498e-02, 1.2535e-01,
-3.9396e-02, 4.8859e-02, 4.1389e-02,
3.7026e-02, 1.3667e-01, 7.5657e-03,
-5.3476e-02, 1.9677e-02, 9.5214e-02,
1.3136e-02, 7.5560e-02, 6.2428e-03,
-5.2378e-02, -1.8704e-02, 1.0657e-01,
-4.2938e-02, -5.0199e-02, 1.4357e-01,
-5.7002e-02, 1.4158e-01, 4.9442e-02,
-6.8383e-02, 1.1316e-01, 5.2071e-02,
1.5031e-40, 2.1250e-40, 1.8673e-40,
1.5681e-40, 1.3104e-40, 1.6173e-40,
2.1560e-40, 1.8582e-40, 1.7747e-40,
8.4848e-02, -1.9845e-01, -5.1844e-01,
3.0959e-01, 3.6682e-01, 3.1208e-02,
1.9871e-01, 2.8318e-01, 1.6066e-01
}
,
{
-2.7283e-39, -4.9031e-39, -2.1039e-39,
-1.0327e-39, -5.1679e-39, -4.3300e-39,
-5.2613e-39, -3.1707e-39, -6.0916e-39,
1.5840e-39, 1.6709e-39, 1.6120e-39,
1.6716e-39, 1.7418e-39, 1.6624e-39,
1.5922e-39, 1.7383e-39, 1.5668e-39,
1.1389e-01, -4.5774e-02, 6.1423e-02,
1.3858e-01, 2.3102e-02, -6.5079e-02,
1.3269e-01, 3.2387e-02, 7.6966e-02,
-2.1531e-39, -1.6063e-39, -3.2070e-39,
-2.8531e-39, 4.6956e-39, 1.4038e-39,
2.0509e-39, -4.4924e-39, -5.3658e-39,
1.1524e-01, -5.0115e-02, 9.4187e-02,
4.2477e-02, 1.4197e-01, 2.4986e-02,
-2.8688e-02, 9.2289e-02, 4.1965e-02,
-2.1691e-01, -6.6916e-04, -1.3026e-01,
-1.9143e-01, 1.2211e-01, 1.2562e-01,
-1.2273e-01, 7.1045e-02, 1.2396e-01,
-8.0861e-02, -4.4301e-03, 6.3144e-03,
3.0338e-02, -8.6463e-03, 5.5084e-02,
-1.8370e-01, -5.0287e-02, -7.2194e-02,
7.4570e-02, 5.4483e-02, -1.2639e-02,
1.2481e-01, 1.4683e-01, -4.7581e-02,
1.6748e-01, -3.1374e-02, -1.7271e-02,
1.9801e-39, -3.3469e-39, -4.7012e-39,
-2.9869e-39, -3.2752e-39, -2.2142e-39,
-4.2927e-39, -1.9635e-39, -8.7517e-40,
2.7286e-39, 2.7755e-39, 2.7501e-39,
2.7114e-39, 2.7711e-39, 2.6858e-39,
2.5562e-39, 2.6523e-39, 2.5846e-39,
1.4015e-01, 1.0486e-01, 1.2320e-01,
4.6545e-02, 1.2068e-01, 9.2531e-02,
1.0717e-01, 3.8738e-02, 1.0181e-01,
-7.4503e-40, -1.1490e-39, 6.1230e-41,
2.4896e-39, 5.3740e-39, -1.4060e-39,
1.9095e-39, -7.1020e-40, 3.5820e-39,
-1.4348e-02, 6.4128e-02, 6.1082e-02,
-1.1112e-02, 8.5993e-02, 2.4835e-02,
1.2794e-01, -9.1072e-02, -1.3487e-02,
-5.8057e-02, 1.3080e-01, 1.0895e-01,
-1.6436e-01, 9.8593e-03, 1.5586e-02,
-1.5336e-01, 3.6391e-02, 1.4539e-01,
-4.6112e-02, 3.0102e-02, 6.2460e-02,
-2.5510e-02, 2.0437e-02, -5.6816e-02,
-1.0308e-01, -1.5284e-01, -7.1036e-02,
5.5290e-02, -6.6632e-02, 4.2268e-02,
-2.7665e-02, 9.3415e-02, 5.1026e-02,
1.5652e-01, 1.0835e-01, 9.6131e-02,
-4.2583e-39, -3.4889e-39, -5.7522e-39,
4.2701e-40, 2.8095e-39, -3.5579e-39,
2.2286e-39, 4.9865e-39, 4.0469e-39,
-6.4320e-40, -3.3384e-39, -5.9025e-39,
-7.9075e-40, -3.0577e-39, -6.0007e-39,
-8.9627e-40, -2.8374e-39, -5.8866e-39,
6.3645e-03, -5.3080e-03, -5.1759e-02,
1.0665e-01, -6.3126e-02, 5.0918e-02,
7.2193e-02, -6.8836e-02, -6.5657e-02,
2.8519e-39, -5.0955e-39, -9.6085e-40,
-3.3563e-39, -5.6038e-39, -1.6256e-39,
2.6872e-39, 1.4728e-39, -1.9908e-39,
-1.5254e-02, 9.8323e-02, 4.5504e-02,
1.3855e-01, 6.9300e-02, 1.9135e-01,
-5.2321e-02, -6.0227e-03, -1.1734e-04,
-1.4457e-01, 9.2761e-02, 4.5219e-02,
-3.0361e-01, 3.4673e-01, -2.3110e-01,
2.1017e-01, 2.4983e-01, 3.1659e-01,
-6.0569e-02, -5.4348e-02, -7.6719e-02,
-6.5060e-02, 2.8902e-01, 8.0732e-02,
-3.3425e-01, -3.1361e-01, -2.7183e-01,
2.8035e-02, -5.8134e-02, -4.3880e-02,
-1.6375e-02, 9.8195e-02, -7.4011e-02,
-5.9523e-02, 1.0234e-01, -5.3357e-02,
2.3364e-39, -2.5324e-39, -4.8333e-40,
2.2903e-41, -3.3061e-39, -2.5779e-39,
-1.8164e-39, -4.9236e-39, -4.9272e-39,
-1.2809e-39, -1.1698e-39, -1.2564e-39,
-1.3111e-39, -1.1778e-39, -1.2543e-39,
-1.4772e-39, -1.4021e-39, -1.4721e-39,
8.8919e-02, -3.4541e-03, -4.9619e-02,
1.0997e-01, 1.0257e-01, 6.9950e-02,
9.2624e-02, 3.2712e-02, 8.7916e-02,
-5.0242e-39, -6.1320e-39, 8.7891e-40,
-4.9951e-39, 2.3873e-39, -2.7823e-39,
-3.6739e-39, -1.8903e-39, 5.2150e-39,
9.6288e-02, 9.7568e-03, -5.8178e-02,
2.3313e-02, 1.1725e-01, 1.0291e-01,
-1.0111e-01, 8.3706e-02, 9.6575e-03,
-8.2531e-02, 7.0089e-02, 1.0821e-01,
-1.1016e-01, 1.8977e-01, 2.5576e-01,
-1.0221e-01, 5.9236e-02, 6.1678e-02,
2.6234e-02, 9.6868e-02, 9.2432e-02,
4.9881e-02, 5.9121e-02, -1.0477e-02,
-1.4693e-01, -1.0030e-01, -1.0608e-01,
1.1936e-01, -2.2301e-02, 1.1363e-01,
1.3981e-01, 6.7734e-02, -8.2775e-02,
1.0404e-01, -7.7360e-03, 4.2523e-02,
-2.6052e-39, 5.7201e-39, -5.6049e-39,
-3.6314e-39, -5.9232e-39, -3.6970e-39,
3.4360e-39, -5.6848e-39, -3.8308e-39,
4.6279e-39, 5.8135e-39, 2.0652e-39,
3.9864e-39, 4.4000e-39, 5.5163e-39,
2.9644e-39, 2.7537e-39, 3.6593e-39,
4.7872e-02, -2.5857e-02, 4.8810e-02,
1.0389e-01, -1.0782e-01, 4.1365e-02,
9.5778e-02, -5.2341e-02, 4.5947e-02,
-8.2652e-40, -5.7602e-39, 4.6187e-39,
-2.8365e-39, 1.4981e-39, 6.2504e-39,
-4.8330e-39, 4.0283e-39, 4.9792e-39,
-1.0893e-03, -8.2708e-02, -1.7925e-01,
8.3461e-02, 3.1339e-02, 8.8096e-02,
7.3139e-02, -1.2212e-01, 1.0489e-02,
-2.4187e-01, -3.8397e-01, 1.3730e-01,
1.9217e-01, 1.4101e-01, 4.9795e-01,
-1.1441e-01, 3.3343e-01, 7.9194e-02,
1.4556e-01, -5.1060e-01, 2.1556e-01,
3.5719e-01, 2.7282e-01, -1.9015e-01,
-1.0941e-01, 2.7634e-02, 1.1833e-01,
-9.3316e-02, -4.1307e-03, 7.8613e-02,
-2.1526e-02, -6.7141e-02, 2.5513e-02,
-3.3942e-02, -8.6282e-02, 3.0446e-02,
-4.5124e-39, -2.7154e-39, 4.9467e-39,
-4.2299e-39, -5.9485e-39, -2.9606e-39,
-4.7642e-39, -4.7981e-39, -4.0169e-39,
-3.8238e-39, 5.7381e-39, 4.0097e-39,
1.9550e-39, 4.5523e-39, 3.1206e-39,
6.0200e-39, 3.0406e-39, 2.0498e-39,
-3.2474e-01, 1.1052e-02, 4.7197e-02,
-1.4658e-01, 1.6728e-01, 5.2190e-02,
4.3174e-02, 4.5864e-02, 5.4472e-02,
2.6403e-39, 2.7421e-39, -4.3011e-39,
-3.6258e-39, -1.3708e-39, 3.6147e-39,
-1.9471e-39, 4.5896e-39, 4.5992e-39,
-9.9986e-02, 7.0727e-02, 8.5023e-02,
2.2501e-02, 1.4343e-01, 1.1878e-01,
2.8126e-02, 7.3239e-02, 1.0468e-02,
4.5032e-01, 4.4730e-01, 1.3446e-01,
-1.3374e-01, 8.8554e-02, 3.5610e-01,
3.0584e-01, 2.3536e-01, 1.6161e-01,
-5.1485e-01, 1.2372e-01, 5.4379e-02,
-2.9665e-01, -3.3157e-02, -1.8688e-01,
5.1777e-02, -1.4315e-01, -1.1366e-01,
-2.4471e-01, 5.5554e-02, 8.9284e-02,
-1.6870e-01, 7.6156e-02, 1.2472e-01,
-1.5633e-01, 4.3184e-03, 1.1078e-01,
4.0579e-39, -3.8271e-39, 1.1535e-39,
6.6968e-40, -1.1545e-39, -5.4217e-40,
3.5566e-39, -4.4956e-40, -1.7097e-39,
-4.1778e-39, -3.7655e-39, -3.7148e-39,
-3.8013e-39, -3.5225e-39, -3.4678e-39,
-3.8369e-39, -3.5583e-39, -3.6518e-39,
-1.4894e-02, 2.4801e-03, -4.6996e-02,
6.7453e-04, 1.8799e-02, 2.9889e-02,
7.2700e-03, 1.2385e-01, 9.2522e-02,
3.9300e-39, 3.1853e-39, 2.8376e-39,
2.8888e-39, -4.8734e-39, 2.3402e-39,
-3.9710e-39, -4.3243e-39, 4.1151e-39,
1.6399e-02, -8.2828e-02, -5.8361e-02,
2.1315e-02, 1.1968e-02, 6.8727e-02,
3.8558e-02, 1.5451e-02, 5.4465e-04,
1.0549e-02, -8.6468e-02, -1.8535e-01,
-1.3616e-01, 2.7371e-01, 1.1157e-01,
-1.7097e-01, 1.3659e-01, 2.2831e-02,
-3.3897e-02, 1.3307e-01, 7.4482e-03,
4.8120e-01, 7.7053e-01, 5.3354e-01,
-2.4277e-01, -5.9136e-02, -1.3419e-01,
-7.4653e-02, -6.4169e-02, -2.9526e-02,
-3.6336e-02, 7.2362e-02, -3.5332e-02,
6.2628e-02, 6.2278e-02, 3.5639e-02,
3.6614e-39, -2.6150e-39, -3.5229e-39,
5.3538e-39, -1.2368e-39, 2.1530e-39,
4.8585e-39, -2.4150e-39, 5.2220e-40,
3.8610e-40, 1.4772e-39, 2.1962e-39,
-1.8493e-40, 1.1409e-39, 1.7309e-39,
-2.5751e-40, 9.1351e-40, 1.3106e-39,
6.2867e-02, -1.2727e-01, -6.5307e-02,
1.1415e-01, -4.5529e-02, -1.1358e-01,
4.3427e-02, -6.0994e-02, -7.7808e-02,
-4.1831e-39, 1.3230e-39, 5.5853e-39,
-3.4646e-39, -7.2824e-40, -3.4263e-39,
1.5344e-39, -5.8245e-39, 1.9910e-39,
1.1000e-02, -3.7088e-03, -8.0042e-02,
9.7603e-02, 8.6581e-02, -1.8921e-03,
2.2820e-01, 6.8073e-02, -8.1081e-02,
-3.3901e-01, -1.1231e-01, -8.6476e-02,
1.1147e-01, 4.9587e-01, -1.7039e-01,
-2.0702e-01, 5.8730e-02, -1.3475e-01,
2.3548e-01, -6.8044e-02, 9.4296e-02,
4.4803e-01, 6.1517e-03, -5.5192e-02,
-2.7304e-01, -2.6003e-02, 4.0713e-01,
2.8621e-02, 6.2698e-03, -1.4746e-01,
9.4819e-02, -1.3109e-02, 3.5540e-02,
4.4047e-02, 3.5066e-02, -9.5886e-03
}
,
{
-6.7011e-03, 1.7398e-01, 1.4767e-01,
-1.9882e-02, 1.9286e-01, 4.8626e-02,
1.1465e-01, -4.4017e-02, -1.9288e-01,
-7.5817e-02, 1.5598e-01, 1.2329e-01,
3.4126e-03, -9.4884e-02, -4.2276e-02,
3.9110e-02, -1.3477e-01, -4.4951e-02,
6.0450e-02, 4.4656e-01, 3.8954e-01,
-2.1207e-01, -1.0600e-02, -5.6351e-01,
1.8074e-01, 3.0797e-02, -4.0380e-01,
-1.0733e-01, 3.7228e-02, 9.7157e-02,
-7.5810e-03, 5.5605e-02, -9.1898e-02,
-1.4992e-01, -5.3206e-02, -1.9667e-01,
-1.6667e-01, 7.6091e-02, 1.7064e-01,
2.5322e-01, -9.4636e-03, -2.7899e-01,
4.2013e-02, 1.5693e-01, 3.1124e-01,
-2.1534e-02, 1.3915e-01, -2.8199e-01,
-2.9683e-03, 1.4445e-02, -1.5552e-01,
3.4759e-02, -2.0321e-01, -1.1155e-01,
3.6164e-02, 2.8664e-01, 2.3426e-01,
-1.2525e-01, -1.7195e-01, -5.2270e-02,
3.8782e-02, 5.7734e-02, 2.1945e-01,
1.0243e-01, -1.3159e-01, -1.7844e-01,
-6.0359e-02, 1.9125e-01, 3.3553e-01,
-1.0876e-01, -1.2149e-01, -5.7185e-01,
-2.0583e-02, -4.8168e-03, -7.1908e-02,
-2.3428e-02, 2.9902e-02, 1.0888e-02,
3.6383e-02, 1.0052e-01, 2.8972e-02,
1.1415e-03, -3.4518e-02, -9.0058e-02,
7.3207e-03, 6.0961e-02, 7.5629e-02,
-4.5969e-02, 2.4314e-02, 6.7658e-02,
-1.3043e-01, -3.0343e-01, -2.0799e-01,
-4.6261e-02, -1.7650e-02, -7.2160e-02,
-2.6291e-02, 1.5707e-01, 9.5021e-02,
-4.1030e-02, -8.1977e-02, -3.0776e-02,
-3.0685e-02, 8.2163e-03, 4.0357e-02,
-6.9633e-02, 6.0690e-02, 1.5418e-02,
-1.2814e-01, 7.3968e-02, -3.3742e-03,
-1.5239e-01, 8.9941e-03, 1.7877e-01,
2.1219e-01, -5.2057e-01, -2.2284e-01,
-3.4681e-02, -1.3594e-02, 1.6700e-01,
-7.7366e-02, 8.5138e-03, -4.3159e-02,
4.0597e-02, 9.7247e-04, -3.4326e-01,
-2.1424e-01, -1.6489e-01, -4.3248e-02,
1.5987e-01, 4.6235e-01, 2.6287e-01,
-1.2270e-02, 1.3165e-01, 5.3217e-02,
7.2716e-02, -7.0677e-02, -1.7740e-01,
-6.2357e-02, 1.1932e-01, 1.5733e-01,
-1.0275e-01, 1.4966e-01, 4.8125e-02,
-4.7150e-02, 1.5516e-01, 6.9615e-02,
6.1252e-02, 5.3859e-02, 1.7052e-01,
3.1940e-02, 1.1842e-01, 4.2265e-02,
-4.9531e-02, 1.1519e-01, 9.8914e-02,
1.3455e-01, 1.3177e-01, -2.7938e-03,
1.1895e-01, 1.1377e-01, 6.1035e-02,
8.0390e-02, -4.1028e-02, 3.7415e-03,
-1.0317e-01, 1.0279e-01, -6.5789e-03,
-2.3339e-02, 7.2741e-02, 4.1662e-02,
-7.4087e-02, 8.8531e-02, -4.9697e-02,
4.6134e-02, 1.4300e-01, 1.1720e-01,
3.8271e-03, 1.7108e-01, -2.4779e-02,
6.9844e-02, -4.6467e-02, -9.1699e-02,
5.5704e-02, -3.0312e-02, -7.8252e-03,
-4.3799e-02, -1.6623e-01, -2.3006e-02,
4.9214e-02, 3.1528e-02, 3.3302e-02,
3.1213e-02, 9.8880e-02, -1.1098e-01,
4.5092e-02, -1.6922e-03, -5.1380e-02,
7.6063e-02, 1.4159e-01, 4.1409e-02,
8.0812e-02, 9.7569e-02, 4.1532e-02,
-1.1136e-01, -4.3686e-02, -1.4144e-01,
-9.7717e-02, 4.8239e-02, 5.3374e-02,
-1.1827e-01, 1.0008e-01, 8.6368e-02,
-6.2572e-02, 3.6484e-02, -6.3361e-02,
4.1008e-03, 1.6709e-02, 4.0553e-02,
2.2766e-02, 2.7241e-02, 5.1786e-02,
1.3607e-02, 5.4638e-02, 6.9439e-02,
-2.4211e-02, 4.0065e-03, -1.9540e-03,
-9.5697e-03, 3.0503e-02, 3.5809e-02,
-4.3456e-02, 2.8959e-02, 4.2898e-02,
-1.5629e-02, -9.4347e-02, 7.2799e-02,
2.3115e-01, 7.3449e-02, 6.9354e-02,
1.6014e-01, 1.8878e-01, -2.2148e-02,
-4.9274e-02, -6.9233e-03, 1.0578e-02,
-4.3291e-02, -7.8361e-03, 1.6647e-02,
-5.6168e-02, 1.0317e-02, 3.1170e-02,
1.2530e-01, -3.2398e-02, -6.5690e-02,
-2.5805e-01, 3.6079e-02, 3.5390e-02,
-1.7236e-01, 6.6798e-03, 4.8924e-02,
1.3314e-01, 5.0646e-02, -3.4844e-02,
-1.2559e-01, -1.1774e-01, 1.2898e-01,
-7.7402e-02, -1.0703e-02, -2.6359e-01,
-3.8706e-02, -2.2082e-02, 2.7591e-03,
-8.2353e-02, -3.1941e-02, -1.1937e-01,
2.9747e-02, 2.0041e-01, -5.1984e-02,
1.7919e-01, 6.3603e-02, -5.5516e-02,
1.0116e-01, 8.7370e-02, -8.6624e-02,
-8.4314e-02, 3.5997e-02, 2.1161e-01,
1.0902e-39, 9.3514e-40, 9.3074e-40,
9.8377e-40, 1.1299e-39, 8.2024e-40,
1.2062e-39, 1.0405e-39, 1.0284e-39,
-5.7829e-40, -6.7489e-40, -6.3814e-40,
-6.8460e-40, -7.9377e-40, -7.6449e-40,
-4.7632e-40, -5.6022e-40, -5.2053e-40,
1.8459e-39, 2.1036e-39, 2.1848e-39,
2.0535e-39, 2.3728e-39, 2.4416e-39,
1.7027e-39, 2.0249e-39, 2.0833e-39,
9.1594e-40, 8.0493e-40, 7.7836e-40,
7.5889e-40, 6.3026e-40, 9.3384e-40,
9.6987e-40, 1.1273e-39, 8.1906e-40,
-7.9046e-39, -7.2328e-39, -7.1040e-39,
-7.9046e-39, -7.1862e-39, -7.4931e-39,
-6.5243e-39, -7.1117e-39, -6.9941e-39,
1.3577e-39, 3.5945e-40, -3.6833e-40,
1.3768e-39, 6.9779e-40, -7.5180e-40,
5.7295e-40, -6.0767e-41, -1.3085e-39,
7.7960e-39, 7.8579e-39, 7.4482e-39,
7.4224e-39, 7.5791e-39, 7.4378e-39,
6.5819e-39, 6.7271e-39, 6.6281e-39,
-1.6535e-39, -7.7817e-40, -8.5918e-40,
-2.0861e-39, -1.3658e-39, -1.0560e-39,
-3.4360e-39, -2.6878e-39, -2.6477e-39,
4.6460e-02, 1.1676e-01, -5.9846e-02,
8.6467e-03, -1.1287e-02, 7.0129e-02,
-1.1277e-01, 1.0321e-02, -1.9567e-02,
1.2145e-01, -7.1995e-02, -1.3615e-02,
9.7877e-02, 6.6061e-02, 1.0272e-02,
1.1391e-01, 5.6974e-02, 9.7472e-02,
-3.3605e-02, 6.1751e-02, -4.3004e-02,
-5.1040e-02, -3.8798e-02, -7.1736e-02,
-1.0179e-02, 8.5964e-02, -8.1435e-04,
2.5149e-02, 7.1990e-02, 8.1534e-02,
6.3133e-02, 5.8643e-02, 4.6756e-02,
-5.3580e-03, 3.4411e-02, 5.2957e-03,
1.0652e-01, -6.6035e-02, 8.5754e-02,
3.2919e-01, -1.5958e-02, 2.1694e-03,
-9.0943e-02, -2.1920e-02, 2.9706e-02,
4.7986e-02, 1.7105e-02, -5.7711e-02,
-4.2066e-03, 6.5668e-02, -1.6617e-01,
1.0057e-02, -2.0108e-03, -1.5499e-01,
6.7941e-02, 1.7352e-01, 4.9498e-02,
6.2013e-02, 9.6180e-02, -2.9861e-03,
-1.2482e-02, 9.5709e-03, -8.7913e-02,
-8.6954e-02, 9.9646e-03, 8.0050e-02,
-4.4157e-02, -6.3008e-03, 4.0645e-02,
-7.9624e-02, 1.0856e-01, -4.5341e-04,
7.1085e-02, 5.7002e-02, 1.1673e-02,
-5.1378e-02, -2.3945e-03, -5.9532e-02,
3.4998e-02, -3.6019e-02, 1.0428e-02,
5.9774e-03, 5.4993e-03, 2.4306e-02,
-5.9813e-03, 4.4999e-02, 7.4744e-02,
-3.0773e-02, -3.6835e-02, 5.8396e-04,
-3.8644e-01, 2.4563e-01, 1.2436e-01,
-3.2986e-01, -1.1044e-01, 2.0753e-01,
-1.3621e-01, -1.3544e-01, 5.8882e-02,
8.8837e-02, 5.7460e-02, -3.0960e-02,
-1.2598e-03, 3.9124e-02, -5.3322e-02,
-4.4227e-02, -3.8000e-02, -3.2677e-02,
1.5675e-01, 1.0808e-01, 1.1024e-01,
5.4468e-01, -5.9268e-01, 1.0088e-01,
8.2360e-02, 1.9646e-01, 6.4799e-03,
1.6357e-01, 6.8273e-02, -1.2051e-01,
4.9511e-02, 4.7334e-01, -4.8876e-02,
-1.3130e-01, -5.1568e-03, 1.0088e-01,
-5.8971e-02, 2.5775e-01, 9.0169e-02,
-3.0461e-01, -3.2353e-02, -2.0293e-01,
1.3897e-02, 1.4249e-01, -5.8661e-02,
-1.3624e-01, -5.3026e-02, 3.1038e-03,
-5.6211e-01, -2.8375e-01, -1.2524e-01,
-2.3813e-01, -2.2439e-02, -4.4082e-02,
9.9066e-02, -7.1735e-02, 2.2345e-02,
-1.4791e-02, 1.3225e-01, 8.9460e-02,
-4.8986e-02, -3.2296e-02, -4.7474e-02,
6.5865e-02, -8.0697e-02, -6.8475e-02,
-7.6845e-02, 1.1568e-01, 3.7443e-03,
1.0448e-01, -3.3206e-03, 5.4523e-02,
5.5741e-02, 5.0917e-02, 1.0209e-01,
-9.6729e-02, 7.8876e-02, -4.9550e-02,
-3.8926e-02, 7.1163e-02, 8.9436e-02,
-1.4001e-03, -9.4980e-02, -7.7747e-02,
9.4335e-02, 1.1605e-01, 9.5715e-02,
1.7951e-02, 4.3177e-03, -5.6937e-02,
4.4558e-02, -5.2562e-02, 4.0652e-02,
1.8058e-01, -1.0763e-01, 4.8927e-02,
-5.2569e-03, -1.3437e-01, 2.8578e-02,
1.3592e-02, -3.9346e-02, 1.0003e-01,
1.8091e-01, 7.2687e-03, -3.7241e-02,
6.0438e-02, 5.7872e-02, 7.3778e-02,
1.2411e-02, 4.1856e-02, -2.8892e-02,
3.2884e-02, 6.9072e-02, -5.9363e-02,
-1.7112e-01, -9.9734e-02, -7.3417e-02,
-8.9623e-02, 4.5292e-02, -1.6635e-01,
-3.1895e-02, 1.4284e-01, 2.0752e-01,
2.3383e-02, -1.3490e-02, 5.1593e-03
}
,
{
5.8708e-01, 2.6026e-01, 8.8379e-02,
3.1818e-01, 7.0055e-03, 1.1652e-01,
1.1719e-01, 8.7711e-02, -1.1687e-02,
7.5741e-02, -3.7970e-01, 1.6001e-01,
1.0739e-01, 3.1735e-01, 2.0061e-01,
8.6719e-02, 8.5111e-02, -3.9354e-02,
-9.9512e-02, -9.1524e-02, -9.7984e-02,
5.6333e-02, -1.5928e-01, 1.1998e-03,
2.7488e-02, 2.8168e-02, 1.3768e-01,
5.9686e-02, 2.8931e-01, -1.7131e-02,
1.6391e-01, 3.3748e-01, 1.2296e-01,
8.9242e-02, 1.4761e-01, 1.7187e-01,
-2.6352e-39, -4.0703e-39, -5.1751e-39,
-2.5214e-39, -3.9666e-39, -4.6282e-39,
-2.4635e-39, -3.6734e-39, -4.3359e-39,
-7.1654e-02, 7.9691e-03, -1.0219e-01,
-5.5684e-02, -1.3065e-01, -1.9106e-02,
1.0561e-01, 5.9054e-02, -2.1279e-02,
-1.8840e-02, 1.6690e-01, 3.8050e-01,
6.2779e-02, -1.2124e-01, 5.0304e-01,
2.1870e-02, 1.7631e-01, 1.4858e-01,
1.4614e-01, -1.1767e-01, -3.9155e-02,
1.2963e-01, -4.6753e-02, 1.3848e-01,
-8.2292e-02, 2.1908e-01, 6.2794e-02,
-3.2625e-01, -8.8528e-03, -6.5603e-03,
5.4245e-02, 2.7983e-01, 2.1608e-01,
8.5890e-02, 1.0955e-01, -1.1606e-01,
9.7435e-02, 1.5911e-01, 6.7285e-02,
3.9570e-02, 1.9333e-01, -1.5531e-02,
-2.3475e-01, -2.5006e-02, 2.8106e-02,
6.8740e-03, 1.3261e-01, -3.8563e-02,
8.8758e-02, -4.2225e-02, 4.7042e-02,
5.6284e-02, -2.8303e-02, 3.4532e-03,
-4.0265e-02, -3.0645e-02, -5.2059e-02,
-4.6196e-02, -2.4868e-02, -3.3257e-02,
-3.7208e-02, -2.4100e-03, -7.1959e-04,
6.4237e-39, 6.1438e-39, 6.5434e-39,
6.1596e-39, 6.1608e-39, 6.3157e-39,
6.4263e-39, 6.4625e-39, 6.5877e-39,
1.1092e-01, -4.4784e-02, 9.1292e-02,
9.2900e-02, 1.2459e-01, -7.1447e-02,
2.6158e-02, -5.0219e-02, -5.6136e-02,
-5.8603e-02, 2.9323e-02, -2.4230e-01,
-9.4921e-02, 1.9103e-01, 1.1670e-01,
1.2022e-02, 6.2830e-02, 3.0393e-01,
3.3819e-02, 1.0040e-01, 8.2600e-02,
-8.7604e-02, 7.0641e-02, -1.0132e-01,
-9.9371e-02, 8.9363e-02, -1.0703e-01,
4.4603e-01, 7.9636e-03, 1.8834e-01,
1.1859e-01, 4.0760e-01, 9.6841e-02,
-1.1735e-01, 2.3993e-01, -7.7916e-02,
6.3481e-02, -1.4958e-01, 1.1554e-02,
5.2668e-02, 3.4379e-01, 8.3536e-03,
-5.5403e-02, 1.1655e-01, -7.5022e-02,
-8.2992e-02, -7.0322e-02, -1.0078e-01,
-1.4516e-02, -1.6558e-02, 6.6806e-02,
-6.7454e-04, -5.7525e-02, 1.5772e-01,
1.6446e-01, -1.1897e-02, -8.3387e-02,
7.1339e-02, 1.6254e-01, 1.6963e-01,
1.2630e-02, 5.7933e-02, 8.4686e-02,
-5.6318e-39, -6.1837e-39, -6.1661e-39,
-5.9923e-39, -6.2371e-39, -6.4922e-39,
-6.4206e-39, -6.6092e-39, -7.1603e-39,
4.6507e-02, -4.5924e-02, -7.3838e-02,
-3.3012e-02, 5.1295e-02, -7.4884e-02,
7.5389e-02, 1.2002e-01, 3.9442e-03,
9.9461e-02, 1.9607e-01, 1.4896e-01,
-1.1191e-02, 1.8352e-01, 2.6778e-01,
8.0977e-02, 1.0885e-01, 2.5331e-01,
3.1503e-02, -3.0004e-01, -6.9114e-02,
2.0705e-01, -2.0978e-02, 1.5154e-01,
6.3033e-02, -1.5721e-01, 5.1067e-02,
-1.1220e-02, 1.5315e-01, 4.5277e-03,
3.3250e-01, 1.4207e-01, 1.3469e-01,
5.2996e-01, -2.5803e-01, -4.5525e-02,
3.9807e-02, -1.7088e-01, -1.2414e-01,
2.1564e-01, -2.9160e-01, -1.8796e-01,
1.5482e-02, 2.7005e-01, 8.2446e-02,
5.4906e-02, -1.0507e-01, -8.0069e-02,
-4.5729e-03, -2.0621e-02, 5.0088e-02,
2.5479e-02, 9.5924e-02, 8.3813e-02,
4.7833e-02, -2.6191e-01, 3.3483e-02,
6.1653e-02, 7.1940e-03, -1.3578e-01,
1.7662e-01, -2.8194e-02, -2.7509e-02,
-1.9419e-39, -2.4904e-39, -2.7567e-39,
-2.9896e-39, -3.2700e-39, -3.6336e-39,
-3.8942e-39, -4.2028e-39, -4.5229e-39,
-1.6839e-02, -9.4421e-02, -3.0147e-02,
-6.5974e-02, -1.6716e-02, 5.0672e-02,
-7.9841e-02, -4.7086e-03, 5.0016e-02,
1.8223e-04, 3.3984e-03, 5.1965e-02,
-7.3512e-02, -5.6604e-03, -1.1630e-01,
-1.0767e-01, 3.2261e-02, -2.0044e-01,
1.0995e-01, 4.3581e-02, -3.9397e-02,
-1.4476e-02, -2.3087e-02, 2.6423e-03,
1.2047e-02, 1.2084e-01, 1.8563e-01,
-2.8497e-01, -2.5353e-01, 1.0933e-01,
8.8974e-03, 1.3315e-01, 1.9153e-01,
2.0427e-02, -8.9900e-02, 2.2363e-02,
2.8575e-02, 1.6351e-01, 1.1876e-01,
-2.7438e-02, -1.0816e-03, -5.5680e-02,
5.1369e-02, -2.0575e-02, 4.5232e-02,
9.4988e-02, 2.5418e-02, 8.9888e-02,
9.6631e-02, 1.5828e-01, 1.1577e-01,
-2.9665e-02, 3.2035e-02, 1.4428e-01,
7.4352e-03, 2.4917e-03, 4.2713e-03,
1.2534e-02, 2.1314e-02, 1.5963e-02,
2.2920e-03, 2.1864e-02, 2.2921e-02,
7.1089e-40, 5.3581e-40, 4.5922e-40,
6.2492e-40, 4.6365e-40, 4.5466e-40,
9.2740e-40, 7.7219e-40, 7.4187e-40,
-7.0909e-02, 1.1127e-01, -8.8953e-02,
-5.0537e-04, 4.5664e-05, 1.3829e-02,
7.4380e-02, 1.3900e-03, 4.0345e-02,
5.7173e-02, 8.7514e-02, -3.9945e-01,
4.4116e-02, 1.4148e-01, -2.7578e-02,
-1.2133e-02, 1.9647e-01, -2.6767e-02,
8.5870e-02, -1.3723e-02, 1.3408e-02,
7.9471e-03, 7.8321e-02, 5.1118e-02,
-8.3660e-02, -7.1584e-02, 2.7423e-02,
-5.5651e-39, -3.2350e-39, 4.7534e-39,
-4.8581e-39, -5.8010e-39, 6.3268e-39,
-3.4016e-39, 6.2313e-39, 5.7413e-39,
-3.0708e-39, 6.0155e-39, -6.3317e-39,
-3.1054e-39, -5.5914e-39, -6.4181e-39,
-1.3636e-40, -6.0343e-39, -6.2034e-39,
1.0108e-39, -2.5283e-39, -8.6098e-40,
1.0088e-39, -2.3042e-39, -8.2029e-40,
1.2802e-39, -3.7761e-39, -4.6451e-40,
1.4160e-39, 7.3869e-40, 1.3275e-39,
1.2560e-39, 1.0078e-39, 1.2296e-39,
-2.4490e-39, 8.6071e-40, -2.4510e-39,
2.1753e-39, -2.0576e-39, -2.1365e-39,
2.0157e-39, 2.0755e-39, 1.9439e-39,
2.0998e-39, 2.0732e-39, 2.1072e-39,
-1.1289e-39, -1.6132e-39, 4.8117e-40,
1.2029e-39, -1.3112e-39, 6.4761e-40,
1.4958e-39, -9.2719e-40, 8.9526e-40,
3.6032e-39, -4.9803e-39, -2.4410e-39,
-1.6429e-39, -4.9602e-39, -5.9626e-39,
-1.6627e-39, -4.9809e-39, -5.6258e-39,
1.6619e-39, 1.7856e-39, 5.1822e-39,
1.5443e-39, 1.4215e-39, 6.1830e-39,
1.4242e-39, -1.7895e-39, 5.2206e-39,
-2.4764e-01, -2.8696e-01, -5.7562e-03,
1.9255e-01, 5.1335e-02, -1.4512e-01,
-1.1017e-02, -3.6505e-02, -1.1773e-01,
5.8651e-02, -1.9354e-02, 2.1595e-02,
-3.5114e-03, 1.8335e-01, 4.0043e-02,
1.0579e-01, -6.3055e-02, 2.6981e-02,
-1.4351e-02, -1.5029e-02, -9.7792e-02,
4.6718e-02, 3.8673e-02, -2.3410e-02,
-2.8942e-03, -8.4898e-03, -3.3613e-02,
2.0298e-01, 9.7218e-02, 1.5052e-01,
3.2108e-01, 2.6568e-01, 1.3809e-03,
1.0008e-01, 6.9262e-02, -4.7810e-02,
4.1291e-39, 4.3762e-39, 4.2724e-39,
4.5864e-39, 4.7827e-39, 4.8821e-39,
4.5529e-39, 4.6921e-39, 4.7519e-39,
9.1246e-03, -1.8136e-02, -5.8517e-03,
9.1080e-03, 4.2591e-02, -1.5604e-02,
-3.6270e-02, 5.9184e-02, 2.3189e-02,
4.2636e-02, 3.6600e-01, 4.7134e-01,
3.6666e-02, 4.3565e-01, 2.1105e-01,
-5.2747e-02, 4.0503e-01, 2.0926e-01,
8.8427e-02, 4.9138e-02, -2.3381e-01,
-5.6521e-02, 7.5013e-02, -1.4783e-01,
-4.7299e-02, -8.1200e-02, -6.5665e-02,
-1.6281e-01, -2.3070e-01, 5.4033e-02,
1.1527e-01, 3.4730e-01, 1.9293e-02,
-1.8352e-02, 2.0626e-01, -1.1955e-01,
8.1665e-02, 3.8584e-02, 2.7958e-03,
6.4294e-02, 1.3912e-01, -5.6370e-02,
-1.7618e-02, 9.0357e-02, -5.5021e-03,
9.3211e-05, 1.5219e-01, 1.0844e-01,
7.6218e-02, 1.7016e-01, 9.2438e-02,
4.3387e-02, 8.0141e-02, -3.2034e-02,
9.2121e-03, -2.8742e-03, -1.5988e-03,
9.1980e-03, 1.6983e-02, 3.3154e-03,
-2.5642e-02, 4.1607e-03, 6.9246e-03,
3.7665e-40, -4.0391e-41, -4.0502e-41,
2.2436e-40, -1.7190e-40, 1.6583e-40,
1.4090e-40, 2.2914e-41, 6.7388e-41,
-8.1776e-02, 9.0814e-02, 1.0222e-01,
-3.4949e-02, 1.0266e-01, 3.6826e-02,
-8.3856e-02, 1.1102e-01, 1.1026e-01,
1.5993e-02, -1.1626e-01, -3.0870e-01,
-3.4119e-03, 1.7638e-01, -1.9092e-01,
-1.2549e-01, 3.2538e-01, -7.9381e-02,
3.8433e-03, -8.2530e-02, 3.2103e-02,
-1.1637e-02, -1.0371e-01, 2.3851e-02,
2.5390e-02, 7.7085e-02, 8.9536e-02
}
,
{
-2.8918e-02, -8.3719e-02, -3.3026e-02,
-2.2620e-01, 2.4280e-02, -2.1254e-01,
2.8231e-02, 3.5323e-02, -2.8425e-02,
1.6891e-01, 3.8192e-03, 7.2794e-02,
-1.6364e-01, -4.1031e-02, -1.3141e-02,
-3.9478e-02, 1.4910e-01, -7.0978e-02,
-6.3880e-02, 9.8206e-02, 1.3163e-01,
1.5778e-01, 1.1914e-01, 3.3277e-01,
-3.6808e-01, -5.5627e-01, 1.4401e-01,
-4.0314e-01, 3.6298e-01, -3.8212e-02,
-2.3782e-01, 2.5410e-01, -2.2334e-01,
7.6542e-02, 9.4998e-02, 3.3399e-02,
-1.8601e-01, -1.8863e-02, -4.1835e-02,
-5.8671e-02, -8.9987e-02, -6.1069e-02,
-7.1062e-02, -9.5987e-02, 1.2318e-02,
5.4541e-39, -1.8871e-39, 4.5048e-39,
-2.2237e-39, -5.4753e-39, 1.4395e-39,
-3.5753e-39, 6.1466e-40, -2.1567e-39,
4.5273e-02, 1.1619e-02, 1.1379e-01,
1.4093e-01, 1.0444e-01, 1.1283e-01,
-3.0230e-02, 3.1937e-01, 5.0541e-02,
8.2862e-02, -3.1540e-02, -6.4833e-02,
1.5168e-01, 1.7613e-03, 4.2690e-02,
1.8820e-01, 4.3783e-02, 6.3473e-02,
8.0477e-02, 1.0397e-01, -3.6337e-02,
-7.2828e-02, 6.4048e-02, 4.2476e-02,
-1.3974e-04, -2.2468e-01, -4.9189e-02,
-2.7478e-03, 8.7663e-03, 4.3870e-02,
-3.3168e-02, 1.1915e-01, -1.8083e-02,
4.8155e-02, -4.1742e-02, 1.1251e-01,
-6.1535e-02, 5.1782e-02, -2.3494e-02,
5.1677e-02, 1.4067e-01, -1.0377e-01,
3.2951e-03, 1.1942e-02, -1.1775e-01,
-2.2104e-02, -8.1073e-02, -3.7509e-02,
6.8970e-03, 1.6406e-02, 4.6923e-02,
-8.8448e-03, 2.9130e-02, 3.1024e-02,
7.6795e-02, 4.6816e-02, -1.3204e-02,
1.3988e-01, 1.1175e-01, 8.7121e-02,
1.2097e-01, -3.8463e-02, 6.7387e-02,
1.4708e-39, 1.7125e-39, 2.7764e-39,
1.5203e-39, 1.5811e-39, 4.4921e-39,
1.8828e-39, 1.7593e-39, 2.3774e-39,
4.3474e-02, -4.7065e-02, -7.1999e-02,
6.0338e-02, 3.7240e-02, 2.8802e-02,
-4.0701e-02, 1.8627e-02, -1.8181e-02,
5.5169e-02, 1.1874e-01, -7.0475e-02,
-1.3438e-02, 1.4335e-01, 1.5180e-01,
5.6331e-02, 7.9719e-02, 6.2691e-03,
-6.6460e-02, 2.7455e-01, 5.5916e-02,
1.3515e-01, -3.7263e-01, 1.3463e-01,
-4.0820e-05, 3.1896e-01, -8.3871e-02,
-7.6172e-02, 6.1963e-02, -1.3804e-02,
-5.2852e-02, 1.0006e-01, -3.4106e-02,
6.7218e-02, -3.8616e-03, -7.1788e-02,
1.6386e-02, -1.8612e-02, -1.7354e-01,
-1.2166e-01, 1.2667e-02, -3.3852e-02,
-3.2897e-02, 1.0343e-01, 2.4924e-01,
-1.3272e-02, 1.5705e-01, 6.7731e-02,
1.0637e-01, 1.9482e-02, -2.0655e-01,
-5.9087e-03, -7.1073e-02, 1.8723e-02,
-2.6087e-02, 1.5997e-01, 9.6264e-02,
1.2431e-01, 1.1462e-01, -9.7197e-02,
-6.2347e-02, -4.5239e-02, -2.6443e-02,
3.7406e-39, -4.6345e-40, 3.7971e-39,
-3.8112e-39, -3.5585e-39, 4.6938e-39,
6.0588e-39, -4.2403e-39, 1.5311e-39,
1.6381e-01, -6.8390e-02, 2.6527e-02,
-9.8612e-02, 2.1953e-01, -2.1886e-01,
7.4841e-02, -1.2118e-01, -8.1700e-02,
4.4974e-02, 7.7514e-02, -8.4620e-02,
-2.9808e-02, 2.1591e-02, -3.9502e-02,
-5.5797e-02, -6.5105e-02, -5.9860e-02,
-3.7811e-01, -2.3056e-01, -7.4491e-02,
4.0833e-02, -2.2613e-01, -1.4986e-01,
-1.0974e-01, -6.5161e-01, 1.7546e-01,
7.7903e-02, -1.5969e-02, -6.3040e-02,
-1.7819e-01, -7.1414e-02, 1.8451e-02,
-1.0618e-01, 3.5614e-03, 3.6719e-02,
1.5666e-01, 3.9222e-01, 9.1678e-02,
1.4519e-01, 5.7331e-01, -7.3466e-02,
1.0271e-01, 1.0803e-01, -1.3150e-01,
3.7496e-01, 1.5001e-01, 1.4727e-01,
3.2151e-01, 1.2875e-01, -8.1645e-02,
2.8629e-01, 1.9329e-01, -8.0009e-02,
-9.9557e-02, -2.6954e-02, 2.6042e-02,
-5.3374e-02, 1.1369e-01, 4.6503e-02,
-3.4068e-02, 9.1849e-03, -9.1420e-02,
4.6343e-39, 4.8289e-40, 3.1694e-40,
-3.5093e-39, -4.7356e-39, 7.1265e-40,
-4.9626e-39, -2.1280e-39, 1.8542e-39,
-1.3634e-01, -5.4825e-02, -6.6125e-02,
-2.0694e-01, 1.4924e-01, 1.4028e-01,
3.2735e-02, 7.6360e-02, -9.2541e-02,
-1.2149e-01, -7.9789e-02, -2.9591e-02,
1.2852e-02, 1.2457e-01, 1.3081e-02,
-3.2966e-03, 1.1089e-01, 8.6461e-02,
1.4352e-01, 5.9238e-02, -2.1140e-02,
7.3999e-02, 2.0893e-01, 3.5512e-02,
-5.3110e-02, 3.9222e-01, 1.3103e-01,
1.0168e-01, 1.6685e-02, 5.1616e-02,
9.8241e-02, -1.6502e-01, -1.2586e-01,
8.3915e-02, 7.4837e-03, 5.7355e-02,
-3.4982e-02, -1.2773e-01, 6.8213e-02,
-1.4674e-01, -3.6844e-01, 8.1546e-02,
-1.5385e-01, -7.0368e-02, 4.3894e-02,
7.8201e-02, -1.3952e-01, 1.5154e-01,
2.3880e-02, 1.4078e-01, -1.2906e-01,
-1.8268e-01, -1.5687e-02, -1.2588e-01,
-9.4643e-03, 1.4718e-02, 7.4932e-02,
3.0996e-02, -1.2339e-01, 1.7452e-01,
4.4221e-02, -1.3808e-01, -1.0205e-02,
-8.6959e-40, -3.7907e-39, -1.6020e-41,
4.3567e-40, 1.4647e-39, 6.5692e-40,
5.4286e-39, 8.8667e-40, -3.5047e-39,
2.4116e-02, -9.5358e-02, 1.6468e-01,
3.1916e-01, -2.3472e-01, -2.1644e-01,
1.2945e-01, -1.8403e-02, -3.2247e-02,
1.3666e-02, -3.0548e-02, -4.7635e-02,
-9.2714e-02, -2.1605e-01, -5.9464e-02,
-8.9110e-03, -3.9299e-03, -2.3289e-02,
-1.7855e-01, 9.0661e-03, -1.9142e-02,
-5.6754e-02, -5.4451e-01, -5.7664e-01,
1.6835e-01, 2.0531e-02, 2.0812e-01,
5.2794e-02, -9.0414e-02, 3.5560e-02,
3.7395e-02, 5.9355e-02, -3.6676e-02,
3.8035e-02, 6.7844e-02, 1.1042e-01,
5.0372e-02, 6.8188e-02, -8.5353e-02,
2.2769e-01, 5.9758e-01, -7.4568e-02,
7.8316e-02, 8.4925e-02, -4.0400e-02,
-7.7984e-02, -2.0739e-01, 1.1736e-01,
2.4528e-02, 2.1850e-01, 2.5639e-01,
-2.4561e-02, 8.4661e-02, -9.2191e-02,
-2.7006e-02, -7.8921e-02, -2.7124e-02,
-5.9232e-03, -2.7693e-02, 5.9524e-02,
9.7704e-02, 9.6223e-02, 2.0432e-02,
-2.5588e-39, 5.5478e-39, -5.6209e-39,
-4.7285e-39, 4.5875e-39, -5.7483e-39,
6.7240e-40, -3.5113e-39, -3.6246e-39,
1.6870e-03, -2.1707e-01, -3.8895e-02,
-5.8465e-02, -5.9146e-02, 1.1936e-01,
-2.7727e-02, -9.5047e-02, -2.2627e-01,
-9.5155e-02, -7.1422e-02, 9.4611e-03,
3.7587e-03, 1.6966e-02, 2.8839e-02,
-3.0794e-02, 1.9888e-02, -5.2541e-02,
-1.0708e-02, 3.0171e-02, -3.0473e-01,
-1.0214e-01, 4.2017e-02, 2.5568e-01,
-9.8664e-02, -5.5928e-01, -7.6876e-02,
-8.6821e-03, 4.6484e-02, -3.0836e-01,
-1.0205e-01, 6.8113e-02, -2.8059e-01,
-5.7828e-02, 2.0990e-02, -1.2843e-01,
7.5680e-02, 1.7504e-02, 1.6278e-01,
1.4075e-01, 2.4361e-01, 2.2737e-01,
-1.3044e-01, 8.2145e-03, 1.6344e-01,
-2.4780e-03, 1.5108e-01, 1.3313e-02,
-9.5257e-02, 6.1810e-02, -1.9386e-01,
7.1365e-02, 1.5328e-01, 9.5848e-04,
1.2278e-01, 7.8318e-02, 3.3400e-02,
4.8597e-02, 6.0632e-02, -5.7238e-02,
3.2522e-02, 4.5926e-02, -9.5566e-02,
1.0844e-39, -3.2490e-39, -2.6904e-39,
-3.0517e-39, 4.7535e-39, 4.3440e-39,
-1.3996e-39, 4.5201e-39, -3.6165e-39,
-5.6164e-02, 1.0353e-01, 6.6228e-02,
8.2147e-02, 4.7827e-01, 1.2004e-01,
-6.8150e-02, 1.8340e-01, 2.2113e-01,
1.0580e-05, -2.0949e-01, -1.0358e-01,
1.6206e-01, 1.2538e-01, -1.3104e-01,
1.3700e-01, 2.9282e-02, -8.7020e-02,
4.5467e-39, 5.9787e-39, 2.6105e-39,
-1.2670e-39, 2.9513e-39, -1.0811e-39,
-3.9129e-39, -1.8499e-39, 2.9297e-39,
5.7414e-39, 5.5907e-39, 5.5702e-39,
5.9004e-39, 5.7585e-39, 6.3188e-39,
5.7395e-39, 5.6146e-39, 5.6451e-39,
-7.3964e-39, -6.3330e-39, -5.5236e-39,
-7.5172e-39, -5.8828e-39, -3.7555e-39,
-6.9528e-39, -7.7656e-39, -5.5115e-39,
-7.9031e-39, -7.8200e-39, -7.7914e-39,
-7.4570e-39, -7.6413e-39, -7.9054e-39,
-7.3437e-39, -6.7956e-39, -7.0789e-39,
-3.6774e-40, 1.3572e-40, 3.0250e-40,
-4.1792e-40, -4.6240e-40, 2.2528e-40,
-5.2143e-40, -5.6847e-40, -4.2768e-40,
-4.0128e-39, 1.3485e-39, 1.3436e-39,
1.5337e-39, -3.9186e-39, 1.2120e-39,
1.2992e-39, 1.5671e-39, 1.5659e-39,
-4.6533e-39, -4.7029e-39, -6.0334e-39,
-5.1157e-39, -5.3257e-39, -5.8595e-39,
-4.3046e-39, -4.4391e-39, -5.0039e-39,
-1.0025e-39, -1.0145e-39, -8.6762e-40,
-1.0282e-39, -1.0939e-39, -9.4134e-40,
-1.1868e-39, -1.2133e-39, -5.4261e-40
}
,
{
-1.2633e-01, 2.7332e-01, -4.6674e-01,
-9.4537e-03, 9.6797e-02, -6.4975e-01,
1.8103e-02, 2.7190e-03, 2.3888e-01,
4.8553e-02, -8.7297e-02, 1.8415e-01,
3.1194e-02, -7.2899e-02, -8.1835e-02,
7.1639e-02, -3.1455e-02, -6.2866e-02,
-2.1413e-02, 4.6066e-02, 9.2372e-02,
1.5761e-01, -1.0352e-01, -3.4808e-01,
2.3715e-02, 1.6453e-01, -1.3699e-01,
1.1705e-01, -1.6882e-02, 1.2575e-01,
-2.9834e-02, -1.1558e-01, 4.7318e-01,
3.5301e-02, 1.1246e-01, 3.5038e-03,
1.5837e-01, -2.9968e-01, 1.6094e-01,
4.0562e-02, -1.6329e-01, -3.7023e-02,
-3.9991e-02, 1.7001e-01, -2.7735e-03,
8.8139e-02, -2.4828e-01, 5.5751e-04,
-1.3871e-01, -2.4839e-01, 1.7996e-03,
-1.1670e-01, 3.3651e-02, -2.9559e-02,
3.8572e-03, 3.7329e-02, 4.7511e-02,
-7.8848e-02, 1.2844e-01, 9.2677e-02,
-8.5041e-02, 5.7212e-02, -1.0415e-02,
-3.2462e-39, 2.3003e-39, 4.9676e-39,
-3.9261e-39, -6.8290e-40, 5.9119e-39,
-4.1242e-39, -1.1996e-39, 3.8436e-39,
-2.3243e-02, -2.2525e-02, 3.9668e-02,
-1.1210e-01, -2.3892e-01, 1.6431e-01,
-1.3998e-01, -1.5857e-01, -1.5625e-01,
-1.7634e-02, -3.9174e-02, -9.0936e-03,
-3.9428e-03, -1.6411e-02, 2.6484e-03,
1.1376e-02, -2.9057e-03, 6.3382e-02,
4.8930e-02, 9.1298e-02, 1.8195e-02,
-6.3365e-02, -1.5407e-01, 8.1543e-02,
4.9919e-02, 1.6852e-01, 4.4053e-02,
-4.8682e-02, -7.3614e-02, -6.9206e-03,
-4.8193e-02, -2.3704e-01, -8.3394e-03,
5.6024e-02, 3.7845e-01, -2.4550e-02,
5.2050e-02, 2.2027e-01, -4.1328e-02,
-6.6327e-02, 1.0450e-01, 1.7058e-02,
-1.2047e-01, 5.2494e-02, -1.8018e-02,
5.4807e-02, 1.1177e-01, 2.3511e-02,
6.0413e-03, -3.2457e-02, 7.6611e-02,
-2.1276e-02, 3.0054e-02, 5.0752e-02,
7.5556e-02, 2.5734e-02, -6.0634e-02,
1.2201e-01, -4.1533e-01, 2.7634e-02,
4.5560e-01, 3.2832e-01, 2.6277e-02,
1.9889e-39, 3.8337e-39, 4.0170e-39,
1.5149e-39, 3.6456e-39, 4.0474e-39,
1.1508e-39, 2.7381e-39, 3.8673e-39,
-7.9206e-02, -2.0763e-02, -2.4842e-01,
-6.5777e-02, -1.8446e-01, 2.6178e-01,
-1.7908e-02, -2.3039e-01, -3.5767e-01,
1.0324e-02, 1.3610e-01, 8.6519e-02,
1.3499e-01, 3.1933e-02, 9.1822e-03,
-3.6017e-02, -2.2056e-01, -2.3258e-01,
-7.6185e-02, -2.8981e-01, -1.1816e-01,
-9.9048e-02, 5.3879e-02, -1.7351e-01,
-2.1874e-01, -1.2109e-01, -3.1457e-01,
5.1576e-02, -2.5656e-02, 4.6789e-02,
7.6286e-02, 6.0126e-01, -2.5925e-01,
-5.3443e-02, -3.3656e-01, 4.7585e-01,
-4.7442e-02, -5.1580e-02, -8.5216e-02,
-1.0600e-01, -1.3859e-01, -3.1484e-01,
2.1454e-01, -1.1851e-01, -7.6614e-02,
-7.8873e-03, -7.0275e-02, -1.0958e-01,
-8.0654e-02, 1.3946e-01, 2.5292e-01,
1.3254e-03, -6.7372e-02, -2.6429e-01,
-8.2344e-02, 1.2388e-01, 5.2930e-02,
8.3665e-02, 3.9729e-01, 4.7687e-02,
-4.4502e-02, -8.3105e-02, -1.6430e-01,
1.2825e-39, 1.7532e-39, 2.1774e-39,
-2.1331e-39, -2.1826e-39, -1.0009e-39,
3.7081e-39, 2.0015e-39, -5.8349e-40,
-3.5278e-02, 6.5211e-02, -5.4199e-03,
8.3961e-02, 3.1410e-02, 4.4510e-02,
-5.4905e-02, 4.0727e-02, -1.5710e-02,
1.0813e-01, 8.2043e-03, 4.1303e-02,
1.3405e-01, 1.4150e-01, 7.2155e-02,
3.3942e-02, -4.7781e-02, 1.6095e-01,
-1.4266e-01, -2.5283e-02, 6.4043e-03,
-1.8699e-02, 1.0895e-01, -2.1497e-02,
5.5074e-02, 1.7031e-02, 1.0572e-01,
7.3199e-04, 1.0813e-01, -9.0280e-05,
1.4808e-01, 2.5436e-01, -1.3749e-01,
2.2936e-02, -7.9733e-02, -2.2360e-01,
6.0406e-02, -1.2874e-01, -7.4692e-02,
-1.3216e-01, -9.9889e-03, 2.7608e-03,
-1.1412e-01, -5.1312e-02, -1.7196e-02,
-2.2800e-02, -1.2112e-01, -9.3855e-03,
3.6905e-02, 1.0049e-01, 9.0602e-03,
-7.3200e-02, 1.0628e-01, -4.8218e-02,
-4.6525e-02, 6.0314e-02, -3.6467e-03,
-8.0943e-02, 2.5461e-01, 1.5461e-01,
-5.7708e-02, -5.7823e-02, 5.4042e-02,
3.8847e-39, 3.5806e-39, 4.1610e-39,
3.9082e-39, 4.1898e-39, 4.1926e-39,
4.1200e-39, 4.3759e-39, 4.3977e-39,
-3.3576e-01, 9.5443e-02, 2.7804e-02,
-2.3834e-01, -7.2650e-01, -1.2229e-01,
1.0380e-01, 1.9520e-01, 3.4571e-02,
-3.7291e-02, 7.6216e-02, 8.6171e-02,
-1.6324e-01, -8.6759e-03, 4.3038e-02,
-3.4364e-02, -7.2777e-03, 3.7451e-02,
1.8826e-01, 1.6387e-01, -3.4750e-02,
-2.0203e-01, 2.4170e-01, 9.0358e-05,
-1.3049e-01, 9.6855e-02, -1.6737e-03,
-6.3782e-02, 7.1413e-02, -6.5077e-02,
-1.5262e-01, 4.3261e-01, -8.4224e-02,
6.4632e-02, 1.0553e-01, -1.5274e-01,
4.4294e-05, 8.6239e-02, 5.7537e-03,
-5.7633e-01, -5.0076e-03, -5.2298e-02,
1.8556e-01, -1.1332e-02, -2.7010e-02,
1.6155e-01, -3.0337e-02, -9.6808e-03,
-2.8404e-01, -2.7625e-02, 1.6058e-02,
5.7937e-02, -6.6464e-02, 1.1096e-02,
7.8268e-02, 8.6122e-02, 2.9298e-02,
6.4696e-02, 2.0285e-01, 4.3660e-02,
1.5339e-01, -3.7650e-02, 7.1438e-03,
-8.9058e-40, -3.6429e-39, -4.7562e-39,
8.3914e-40, -2.8054e-39, -3.6702e-39,
4.3666e-39, -1.0602e-39, -3.0369e-39,
7.2731e-02, -1.0227e-01, -1.9583e-02,
-1.7466e-02, -2.0097e-01, 9.3108e-02,
6.5196e-02, -1.1880e-01, -3.5152e-03,
-5.6533e-02, 6.2109e-02, 5.2029e-02,
5.7971e-02, 5.1577e-02, 6.6318e-02,
-2.1669e-03, 7.7274e-02, -4.0609e-02,
2.8531e-02, -8.3960e-02, 1.3615e-02,
-1.1151e-02, -1.4162e-03, 5.6661e-02,
-8.0954e-02, -1.0600e-01, 4.3276e-02,
7.6762e-04, 3.1437e-02, -6.1084e-02,
-8.1119e-02, 2.1406e-01, 6.0836e-02,
4.8105e-02, -1.6263e-01, 9.2555e-03,
1.1060e-01, -2.1090e-01, 1.6435e-01,
-1.0248e-01, -1.1884e-01, -7.9929e-02,
5.9980e-02, 1.0271e-01, -1.1891e-02,
-7.5044e-02, -2.3655e-02, -5.2865e-02,
2.1542e-02, 2.7305e-04, 1.3508e-01,
-1.2317e-02, 9.0742e-02, -3.0079e-03,
-9.9020e-02, 1.5578e-01, -2.1482e-03,
-8.9029e-02, 1.8470e-01, 3.7571e-02,
-2.0394e-01, -1.3735e-01, 2.9648e-02,
-4.3016e-40, -7.3591e-40, -7.3773e-40,
-4.1239e-40, -8.6029e-41, -6.9504e-42,
-7.5082e-40, 1.2975e-40, 2.1462e-40,
-1.8967e-02, -1.4903e-01, 8.1452e-02,
1.2099e-01, -2.5524e-02, 1.3285e-02,
-1.3780e-01, -5.3359e-02, -3.1310e-02,
-1.8984e-02, 4.1962e-02, 1.0186e-01,
-1.0823e-01, 1.1079e-01, 7.8613e-02,
-1.4521e-01, -7.7509e-02, 1.8768e-02,
5.0613e-03, -3.0459e-02, -6.3055e-02,
4.4540e-02, 2.0135e-01, 9.6351e-02,
-1.9495e-02, -1.2314e-01, 1.1720e-02,
2.1739e-02, 5.2098e-02, -4.0453e-02,
-9.9983e-02, 4.7578e-02, -2.7862e-02,
-8.6565e-02, 1.5241e-01, -4.0462e-02,
4.0458e-02, -1.2871e-01, -4.3491e-02,
9.8981e-02, -1.3637e-01, 2.0092e-02,
1.5626e-01, -8.4550e-04, -2.5701e-02,
1.8511e-02, -1.0257e-01, -7.3238e-02,
-3.9802e-02, -1.6120e-02, -7.4068e-04,
-1.1377e-02, 9.7975e-03, -9.0342e-02,
-6.7152e-02, 1.0208e-01, 2.5234e-02,
-4.3687e-02, 2.5334e-01, 9.2712e-02,
3.7702e-01, 4.1450e-02, 1.9934e-02,
-5.4201e-39, -6.7158e-39, -7.5025e-39,
-5.2548e-39, -6.4829e-39, -7.2782e-39,
-4.9999e-39, -5.9599e-39, -6.0469e-39,
3.5890e-02, -7.3738e-02, 9.8899e-02,
3.3312e-02, 5.8231e-02, -2.1348e-01,
8.6289e-02, 5.0837e-02, -6.5613e-02,
7.0208e-02, 4.1424e-02, -6.0761e-02,
4.4654e-02, -3.3590e-02, -5.3044e-02,
1.2319e-01, -4.4666e-02, -8.8193e-02,
-9.0463e-02, -3.0083e-02, 6.8075e-02,
4.2531e-02, 4.3248e-01, 1.3480e-01,
9.2389e-02, 1.3683e-01, -2.6092e-01,
2.8925e-02, 2.3317e-01, 7.8128e-02,
6.3444e-02, 1.6291e-01, -3.8727e-03,
6.9107e-02, 6.8477e-03, 3.9528e-01,
3.8471e-02, 3.0745e-02, 2.8446e-02,
1.0625e-02, -2.4006e-01, -1.2490e-01,
-1.3002e-01, 2.0025e-01, 4.7618e-02,
-3.9705e-02, -1.2017e-02, -9.8790e-02,
-1.2798e-02, -2.7540e-01, -1.5138e-01,
-1.0290e-01, 5.0112e-02, -1.7391e-01,
-9.7079e-02, -2.2350e-03, -5.9211e-02,
-2.4728e-01, 4.3353e-01, -1.9306e-01,
-1.8039e-01, 1.2689e-01, 5.2103e-02,
-4.5547e-39, -7.8040e-39, 4.1196e-39,
1.5214e-39, 9.3494e-40, -3.9058e-39,
7.8718e-39, 7.1728e-39, 5.3609e-39
}
,
{
-9.4505e-02, -7.0477e-02, -1.5792e-04,
-2.3475e-01, 5.8849e-02, -6.8161e-02,
7.0658e-03, -1.0276e-01, 7.2471e-02,
-7.3820e-03, -3.0740e-02, -1.1131e-01,
2.8429e-02, -3.5750e-01, -8.4683e-02,
-5.0210e-02, -3.1096e-03, -2.3730e-02,
4.5756e-02, -3.6724e-01, -7.6317e-02,
3.8467e-01, 5.5354e-02, 1.6943e-01,
-4.9403e-02, 7.4709e-02, -3.0550e-02,
-7.5324e-03, -1.6910e-01, -1.6103e-01,
4.6314e-02, 1.2912e-01, -3.0488e-02,
2.6388e-02, 5.6925e-02, 6.4396e-02,
3.7748e-03, -2.1310e-02, 1.1410e-01,
-7.0164e-03, 1.8228e-02, -2.5920e-01,
6.8416e-02, 1.3998e-01, 1.3290e-01,
-3.8861e-02, 8.9898e-02, -3.6631e-03,
3.5528e-02, 1.1249e-01, 3.7018e-02,
-6.2334e-02, -4.8470e-02, -4.4094e-02,
3.1574e-02, -1.2162e-01, 1.9669e-01,
-4.6605e-03, 1.1887e-02, -1.1958e-01,
-1.0736e-01, 6.0131e-02, -1.2829e-02,
2.1305e-01, -8.4750e-02, -2.7028e-02,
-3.0351e-01, -6.4246e-03, -7.9128e-02,
1.3081e-01, 9.5878e-02, 1.6193e-02,
-5.8335e-02, -5.5968e-02, -2.6284e-03,
-7.2218e-02, -1.1661e-02, 1.9413e-03,
-1.6043e-01, 1.1388e-01, -3.6473e-02,
-2.4077e-02, 1.2210e-01, 1.5531e-02,
1.5074e-01, -4.5545e-01, 6.1004e-02,
-6.3948e-02, 3.9804e-02, -4.8822e-04,
1.3135e-01, 9.2392e-02, 8.8914e-02,
1.2941e-01, -3.6052e-01, 3.9571e-02,
-2.4838e-02, 7.0425e-02, -1.9016e-02,
2.7629e-02, -7.0648e-02, -2.6838e-02,
-2.1844e-02, -9.6184e-02, -3.3611e-02,
8.5938e-02, 5.2663e-02, 2.2938e-02,
-6.9909e-03, -3.9627e-03, -6.5162e-02,
-4.9296e-03, -4.0383e-02, 6.7670e-01,
1.5251e-02, 2.1000e-01, -1.9137e-01,
2.2825e-02, 1.6640e-02, 3.8147e-02,
7.1902e-02, -4.9821e-02, -6.5592e-03,
1.5826e-02, 2.1626e-02, 1.1646e-02,
1.5180e-02, 1.5664e-01, 9.8696e-03,
-7.2901e-02, -2.1818e-01, 9.2465e-02,
6.4349e-02, 6.0290e-02, -2.1094e-02,
2.0633e-02, 4.8808e-02, 1.4080e-02,
4.8083e-02, -1.5979e-01, -5.3634e-02,
6.5004e-02, 7.0317e-02, 1.9117e-02,
-4.3048e-02, 5.9627e-02, -1.5068e-02,
1.8861e-01, -2.6868e-01, 1.2789e-03,
1.1273e-01, -2.7796e-01, 4.9841e-02,
4.9008e-03, 1.8241e-02, 4.3449e-02,
2.1420e-02, -1.0299e-01, -1.6235e-01,
-1.9300e-02, -1.5121e-02, 2.0616e-03,
-2.7591e-01, 3.9622e-02, -5.0492e-02,
1.1866e-01, 5.5502e-01, -2.3622e-02,
-6.1204e-03, -7.4778e-03, 6.7961e-03,
2.4215e-02, 2.1643e-03, 1.1442e-01,
7.5326e-02, 1.4455e-01, 8.0497e-02,
6.6115e-02, 2.9762e-02, 2.8680e-02,
3.7784e-03, -2.2769e-02, 2.4529e-02,
-1.1441e-02, 9.8463e-02, -1.2761e-02,
1.0642e-02, 5.2871e-02, 1.9650e-01,
-2.2225e-02, 3.1504e-02, 8.5645e-03,
4.9125e-02, 1.4439e-01, 8.4573e-02,
1.0103e-02, 1.9097e-02, 4.5579e-03,
-2.5773e-02, -4.0984e-02, -1.5402e-01,
5.3050e-02, 1.5509e-01, -1.9040e-01,
3.7700e-02, 1.0632e-01, -2.2520e-02,
-5.6582e-02, -4.6040e-02, -5.7562e-03,
-3.4924e-01, 3.2933e-01, 5.5211e-02,
2.3230e-02, 8.5108e-02, 3.7448e-02,
1.4266e-02, -7.2016e-02, 4.5252e-03,
-7.0246e-02, 3.9142e-01, -1.9216e-02,
2.0536e-01, -3.5615e-01, 3.8009e-02,
1.2252e-02, -5.7966e-02, 9.2672e-02,
2.4225e-02, -1.0186e-01, -1.4219e-01,
-2.8815e-02, 1.3088e-02, -2.6031e-03,
-6.2341e-02, -1.1216e-01, -7.2122e-02,
1.1812e-01, 4.3493e-01, 4.3593e-02,
-1.3524e-02, 4.8679e-03, -1.0598e-02,
3.4904e-02, 5.5813e-02, 4.6811e-02,
8.0928e-02, 7.6607e-02, 6.3968e-02,
5.4647e-02, 2.8693e-02, 2.1957e-02,
-8.2725e-03, 5.4668e-02, -3.0533e-02,
-9.3953e-03, 1.5874e-01, -3.6093e-01,
5.6412e-03, 1.8977e-02, 2.0088e-01,
-1.9414e-02, 1.9088e-02, 1.4504e-02,
5.8462e-02, 6.2645e-02, 4.9884e-02,
6.6913e-03, 4.3639e-02, 1.5139e-02,
-2.1897e-02, -1.1436e-01, -5.0838e-02,
7.1176e-02, 8.4667e-02, -1.4480e-01,
3.7676e-02, 1.0840e-01, -2.6417e-02,
-4.7584e-02, -4.0524e-02, 6.3032e-03,
-2.4822e-01, 2.4635e-01, 5.5942e-03,
-1.3347e-02, 1.0515e-01, 4.2549e-02,
-1.2380e-01, 4.1074e-02, 1.2608e-02,
-1.2042e-01, 2.9516e-01, 2.8380e-03,
5.1930e-01, -1.6498e-01, 5.7152e-02,
-6.5519e-02, 1.1001e-01, 2.8943e-02,
1.0854e-01, -6.0107e-02, -1.6730e-01,
-4.4417e-02, 3.4347e-02, -3.3756e-02,
2.0694e-01, 3.3047e-01, -9.4497e-02,
-2.1977e-01, 4.6614e-02, 1.2201e-01,
-2.9541e-02, 1.8900e-01, -1.8391e-01,
2.0064e-02, -3.2480e-02, -8.9041e-03,
-5.6385e-02, -6.4531e-02, 1.2879e-02,
-3.2499e-02, 1.0883e-02, 7.3564e-03,
1.9828e-02, -2.3278e-01, -4.3789e-03,
9.7669e-02, 1.3008e-01, -1.0405e-01,
2.2618e-02, -2.5495e-01, -1.0718e-01,
4.3524e-02, -7.3127e-02, 8.2424e-02,
-5.0193e-02, 4.0634e-03, 4.0696e-02,
2.7419e-02, 1.8353e-01, 9.2117e-02,
-7.4918e-02, 1.0602e-01, -3.4752e-02,
-1.3331e-01, -2.9583e-02, -5.2197e-03,
-3.7852e-02, 1.5998e-01, 1.5078e-03,
-5.6512e-02, 1.3378e-01, 1.4512e-02,
4.5255e-02, 2.4702e-01, -2.4848e-02,
-1.7526e-01, 1.5532e-01, 8.6686e-02,
3.1486e-02, -2.3247e-02, 9.7320e-03,
-5.2106e-01, 4.7937e-02, 4.1614e-02,
5.5436e-02, -2.0432e-01, 1.2444e-02,
-5.6792e-02, -5.5632e-02, 5.7612e-02,
-6.0248e-04, 4.9770e-02, -6.7956e-02,
1.3389e-02, -9.4141e-03, -7.3497e-03,
-4.6361e-01, 2.7450e-01, -8.2210e-02,
-2.6737e-01, -6.6114e-02, 6.3568e-02,
1.6910e-02, 1.4456e-01, -9.0081e-02,
8.8278e-03, 2.1776e-02, 8.7710e-03,
-2.3378e-02, -4.3907e-02, -3.6751e-02,
-2.4694e-03, -6.0419e-03, 3.0840e-02,
-1.6968e-02, -8.2266e-02, -1.0049e-01,
3.4429e-02, 1.0960e-01, 3.8355e-01,
-4.0301e-04, -3.1089e-02, -2.1373e-02,
-2.4172e-02, 4.6432e-02, 8.0742e-03,
-2.3134e-02, 1.7789e-02, 2.7136e-02,
3.0729e-02, 6.9008e-03, 1.2822e-02,
3.5043e-02, -6.1749e-02, -1.2565e-02,
-1.0354e-02, -2.6515e-03, 4.5632e-03,
-5.9818e-02, -9.7686e-04, -6.6467e-03,
-5.0833e-01, 1.8474e-02, 1.3598e-02,
3.6287e-01, 1.3698e-01, -1.2806e-02,
-2.8618e-02, -2.9128e-02, 2.9855e-02,
8.1243e-02, 4.7414e-02, -4.7434e-02,
-3.3738e-02, -3.4926e-01, 1.7786e-02,
1.0056e-01, -5.7937e-02, -1.8308e-02,
1.8214e-02, -1.9519e-01, 2.2152e-02,
-7.3543e-02, 2.0786e-01, -5.8196e-02,
3.9396e-02, -4.5349e-02, 1.5748e-02,
-5.4604e-03, 4.5777e-01, 1.7295e-01,
-2.0570e-01, -3.0970e-01, -1.9075e-01,
7.6751e-02, -1.3099e-01, 6.1278e-02,
6.0222e-02, 5.4418e-02, 1.2259e-01,
3.2160e-02, 8.5146e-03, 3.4578e-02,
-5.4391e-02, -2.5285e-02, 1.0251e-02,
-3.2763e-02, 7.9163e-02, -7.5136e-02,
1.8545e-02, -2.1972e-02, 1.3887e+00,
-1.2402e-03, -2.5679e-01, 7.2392e-02,
4.9692e-03, 1.7034e-02, 4.7043e-02,
1.2093e-02, -3.1230e-02, -8.2613e-03,
-7.8701e-03, -2.3516e-03, -7.2487e-04,
6.8495e-02, -5.2837e-02, -2.2482e-01,
1.3259e-02, 4.8009e-01, -4.0940e-02,
-4.1547e-02, -2.8753e-02, -5.2579e-03,
-1.7152e-01, -3.3676e-02, 1.5080e-02,
8.6014e-02, 7.9239e-02, 4.2196e-02,
-9.2870e-02, -1.5913e-02, -6.5804e-03,
4.0364e-02, 2.4914e-02, -1.4638e-02,
8.8705e-03, 2.8037e-01, 3.9890e-02,
1.1638e-01, 2.9467e-01, -4.3518e-03,
7.1091e-02, -2.2378e-01, 4.7315e-02,
3.8006e-02, -2.0246e-01, -3.8679e-02,
-5.8004e-02, 5.8991e-02, -6.2149e-03,
-1.3034e-01, 1.5540e-01, -5.2558e-02,
8.1594e-02, 3.5570e-01, 2.1220e-02,
1.4977e-02, 2.4493e-03, -4.0627e-02,
1.1402e-01, 6.6962e-02, 1.1150e-01,
1.1824e-01, 1.1492e-01, 1.1219e-01,
6.6067e-02, 6.9639e-02, -8.1836e-02,
-2.7144e-02, 1.4677e-01, -5.9261e-02,
4.4573e-03, 2.6235e-01, -7.4379e-01,
-8.3569e-03, 9.4465e-02, -6.5653e-03,
2.1095e-02, -1.8853e-02, 6.7972e-02,
1.2957e-01, 3.0122e-02, -1.0061e-02,
-3.4832e-02, 8.5404e-02, 5.7663e-02,
-5.0400e-02, -1.2050e-01, -2.3344e-01,
1.4977e-01, 7.8806e-02, 6.0771e-03,
5.6483e-02, 6.3927e-02, -5.8376e-03,
-2.8124e-01, 5.2581e-02, -1.3918e-04,
-1.4341e-01, 3.6558e-01, 4.7332e-02,
-3.9089e-02, 8.4188e-02, 2.7058e-02
}
};
static __device__ __constant__ const float HDNL2biasL[8][8] =
{
{
7.2678e-02, 8.5350e-03, 5.0400e-02, 2.6268e-02, 6.2434e-02, 1.0483e-01, -7.1650e-39, 1.0062e-01
}
,
{
-4.9844e-39, -1.8567e-39, 6.0627e-04, -1.9234e-38, 1.8331e-02, -1.1364e-01, -8.3962e-03, -1.7372e-04
}
,
{
-0.0091, -0.0055, 0.0237, 0.0093, -0.0479, 0.0188, -0.0034, 0.0399
}
,
{
6.5694e-03, -2.2259e-01, -1.1226e-02, -8.0327e-02, -1.0615e-36, 1.0402e-02, 7.6246e-03, -6.5940e-02
}
,
{
5.0711e-02, 7.1911e-02, 2.5293e-02, -1.5608e-02, 5.3835e-02, -1.6967e-38, 2.2243e-02, 3.2742e-02
}
,
{
1.5629e-02, 2.9703e-02, 2.6412e-02, 1.2301e-02, 1.8654e-01, -7.2260e-03, 2.4613e-02, -3.1853e-38
}
,
{
-0.0030, -0.0123, 0.0348, 0.0277, -0.0152, 0.0005, -0.0124, -0.0209
}
,
{
7.4856e-03, 7.2931e-04, 8.3015e-03, 6.4820e-03, 2.4008e-04, 7.0377e-06, 1.7948e-03, 8.9869e-03
}
};
static __device__ __constant__ const float HDNL2kernelsL10[4 * 8] =
{
0.4240, 0.4165,
0.1648, 0.1909,
-0.0985, -0.4455,
0.4639, -0.0533,
-0.1368, 0.4413,
0.2539, 0.3294,
0.2458, -0.3256,
-0.0479, 0.3200,
-0.3977, -0.0422,
-0.2736, 0.1053,
0.3902, 0.0594,
-0.0721, -0.2988,
0.0495, 0.1309,
-0.1703, 0.0033,
0.3061, 0.1827,
0.2443, -0.1259
};
static __device__ __constant__ const float HDNL3kernelsL1[9 * 8] =
{
-0.0461, 0.1274, 0.2976,
-0.0393, -0.1251, 0.2527,
0.0791, 0.0600, -0.0303,
-0.0520, -0.5039, -0.3305,
-0.0115, 0.0456, 0.4370,
0.0601, 0.0780, 0.3106,
-0.0017, -0.0018, -0.0017,
-0.0017, -0.0018, -0.0018,
-0.0017, -0.0017, -0.0017,
0.2666, 0.1687, 0.2303,
-0.1901, 0.3825, 0.3024,
0.1811, 0.0581, 0.2080,
-0.1246, 0.0155, -0.4075,
0.1156, 0.5929, 0.1449,
-0.1080, -0.0171, -0.0516,
-0.0817, 0.2247, 0.0472,
0.0394, 0.1085, 0.1435,
-0.0480, -0.0135, -0.0606,
-0.0083, 0.2045, 0.1056,
-0.2239, 0.2823, -0.1926,
0.2581, 0.1362, -0.1914,
-0.0833, 0.0702, 0.0234,
0.3616, 0.3789, -0.1840,
0.0128, 0.1347, -0.0187
};
static __device__ __constant__ const float HDNL3biasL1[8] =
{
-0.1329, -0.0431, -0.0031, -0.0129, 0.2294, -0.2595, -0.2370, -0.0499
};
static __device__ const float HDNL3kernelsL[8][9 * 8 * 8] =
{
{
1.4090e-01, -1.8985e-02, -6.8589e-02,
6.6491e-02, 1.4360e-02, 8.5223e-02,
1.8782e-01, 9.8042e-02, -3.4558e-02,
2.5606e-01, 2.2027e-01, 2.7603e-01,
1.9424e-01, 3.4537e-02, 9.5975e-02,
1.1223e-02, -4.3377e-01, -1.4760e-01,
-3.4293e-40, -5.5421e-40, -4.4763e-41,
-6.3322e-40, -3.1495e-40, -7.8264e-41,
-1.5375e-40, -3.3656e-40, 5.2441e-40,
1.2413e-01, 1.5682e-01, 1.1465e-01,
1.6683e-02, 7.8382e-02, 1.0110e-01,
1.4902e-01, 1.3608e-01, 1.1674e-01,
-6.5160e-02, 7.7748e-02, 2.1773e-02,
2.0652e-02, 2.7245e-01, 1.0297e-01,
-2.0953e-02, 6.1685e-02, 4.4128e-02,
6.1538e-02, -1.9746e-02, -1.2785e-02,
2.5931e-02, 1.2740e-01, 9.0033e-02,
8.6448e-02, 2.0684e-01, 9.8063e-02,
-7.8384e-03, 6.3277e-02, 7.6751e-03,
3.5956e-02, 1.0555e-01, 4.2728e-02,
7.1578e-02, 1.3253e-01, 1.1171e-01,
-2.7538e-02, 1.5836e-01, 1.0014e-01,
-4.9113e-02, 1.6911e-01, 2.7329e-01,
7.9170e-03, 9.5440e-02, 1.3922e-01,
8.0151e-02, 4.3438e-02, 5.5314e-02,
3.4896e-02, 1.6816e-01, -4.5783e-03,
-1.4579e-03, 2.0493e-01, 2.6238e-02,
2.6499e-02, 3.9490e-01, -1.1582e-02,
3.5790e-01, 1.4317e-01, -2.1775e-01,
4.1794e-03, -3.2513e-01, -1.6729e-01,
3.4040e-41, -6.2960e-42, -1.0067e-40,
5.5978e-41, -1.2353e-40, -1.1347e-40,
5.4572e-40, -6.4384e-40, -4.1234e-40,
-9.3690e-02, 1.7765e-01, 1.1275e-01,
9.1159e-03, 1.7375e-01, 1.1427e-01,
-7.8385e-02, 1.5658e-01, -3.8399e-02,
-1.0756e-01, 5.9943e-02, -6.7273e-02,
-1.1117e-01, 1.5267e-01, 1.1563e-01,
-1.2964e-01, -3.8604e-02, -2.4532e-02,
1.6324e-02, 1.3112e-01, 6.1679e-03,
-7.7703e-03, 2.6311e-01, 8.9427e-02,
-2.8948e-02, 1.9341e-01, 4.4339e-02,
6.4559e-03, -6.8885e-02, 1.1481e-01,
-1.0665e-01, 3.8613e-02, 7.0410e-02,
-6.1680e-02, -1.7374e-02, 9.5475e-03,
-4.0081e-02, -3.1549e-02, 2.8311e-01,
-1.2178e-01, -1.3848e-01, 1.7416e-01,
-8.1756e-02, -1.7718e-01, 7.9533e-02,
-3.1299e-03, -3.2305e-03, -3.2094e-03,
-3.1548e-03, -3.2553e-03, -3.2453e-03,
-3.1459e-03, -3.2278e-03, -3.2076e-03,
-3.6554e-05, -3.6715e-05, -3.1284e-05,
-1.4927e-05, -1.4357e-05, -1.2185e-05,
-1.5771e-09, -1.1439e-09, -6.4952e-10,
3.7723e-40, 4.9166e-40, -2.1946e-40,
-4.7599e-40, -4.3356e-40, -8.3928e-41,
2.6127e-40, 4.8634e-40, 2.7720e-40,
-5.4972e-03, -5.6409e-03, -5.6919e-03,
-5.5818e-03, -5.7079e-03, -5.7542e-03,
-5.6338e-03, -5.7437e-03, -5.7600e-03,
-3.7940e-03, -3.8853e-03, -3.8693e-03,
-3.8995e-03, -3.9616e-03, -3.8945e-03,
-3.8438e-03, -3.9156e-03, -3.8269e-03,
-7.2342e-05, -7.8682e-05, -4.7701e-05,
-1.1126e-04, -1.1918e-04, -7.8931e-05,
-1.1644e-04, -1.2418e-04, -8.2350e-05,
-2.3881e-04, -3.7971e-04, -3.9448e-04,
-2.4112e-04, -3.8395e-04, -4.0189e-04,
-2.3451e-04, -3.7525e-04, -3.9222e-04,
-3.9853e-03, -4.0748e-03, -4.1134e-03,
-4.0685e-03, -4.1456e-03, -4.1548e-03,
-4.0547e-03, -4.1388e-03, -4.1357e-03,
5.3008e-02, 2.2252e-02, -7.1158e-02,
-6.6411e-02, -3.0015e-02, -2.2526e-02,
1.2259e-01, -6.2488e-02, 5.6190e-02,
1.5981e-02, -7.6832e-02, 1.7908e-02,
2.7618e-01, 5.4054e-02, 8.7282e-02,
1.5212e-02, -1.1097e-01, -2.2265e-02,
-6.8532e-41, -6.0539e-40, 4.6269e-40,
-2.9221e-40, -3.8468e-40, -4.6656e-40,
6.4572e-40, -6.1625e-40, 6.4545e-40,
3.5920e-02, 9.0955e-02, -1.7626e-02,
4.7826e-02, 1.8832e-01, -4.4043e-02,
-3.8405e-02, 5.9176e-02, 6.8182e-02,
3.7657e-03, 2.6441e-02, -2.5585e-01,
1.0969e-01, 2.3914e-01, 3.5120e-02,
-1.6252e-01, 3.4371e-02, -2.7501e-01,
4.9289e-02, 2.2088e-02, -1.4588e-02,
1.6384e-01, -8.1421e-03, -6.9613e-02,
1.0820e-01, 1.1137e-01, 7.2648e-03,
1.5243e-01, 1.3659e-01, 2.7553e-02,
1.3966e-01, 1.1019e-01, 1.9817e-02,
1.1420e-01, -5.1386e-03, 6.8617e-03,
-1.3264e-02, 2.1508e-01, 4.8430e-02,
5.1149e-02, 2.9165e-01, 2.8077e-01,
2.9288e-03, 9.0611e-02, 8.1538e-02,
-1.1812e-01, 1.5603e-02, 1.1571e-01,
-3.4958e-02, -1.6688e-03, -4.6619e-02,
-1.0417e-02, -3.1802e-02, 1.8357e-02,
1.1064e-01, 1.8397e-01, 4.8449e-02,
-8.3336e-03, 1.6029e-01, 3.9490e-02,
-4.0959e-01, -2.6134e-01, 2.0766e-02,
6.6073e-41, -6.7490e-40, -5.1131e-41,
-4.3320e-41, -3.7194e-40, 2.0674e-40,
-5.2359e-40, -3.4006e-40, -4.9257e-40,
-4.7260e-02, 2.8518e-03, -2.7764e-01,
6.9182e-03, 1.3938e-01, -1.3162e-01,
-6.0901e-03, 1.0339e-01, 6.0419e-02,
-1.4449e-01, -3.2043e-02, -9.1466e-02,
-1.4022e-02, 3.1703e-01, 5.8166e-02,
-1.5243e-02, 1.4521e-01, 2.0790e-04,
-1.0255e-01, -7.8766e-02, -1.2395e-01,
7.9894e-03, 3.7079e-03, -3.2134e-02,
1.1663e-01, 1.4808e-01, 2.0431e-01,
7.4026e-02, 6.9632e-02, 1.7156e-01,
-3.0385e-02, 2.3218e-01, 7.3855e-02,
-8.8530e-02, -5.9224e-02, 2.3431e-02,
1.4596e-02, 3.2442e-02, -1.1308e-01,
-6.3734e-02, 2.5270e-01, 7.8081e-02,
1.0468e-02, 1.5473e-01, 3.8676e-02,
-1.0842e-01, 8.6778e-03, 1.4985e-01,
8.1757e-03, -8.2109e-02, 8.5471e-02,
-2.1437e-01, -6.1173e-02, 4.8163e-02,
2.8965e-01, 1.9748e-01, 4.2651e-02,
1.8196e-01, 3.3932e-01, 3.9594e-01,
3.9657e-01, 4.2167e-01, 2.9290e-01,
7.4011e-41, 6.5220e-40, -5.9885e-40,
7.4011e-41, 6.2047e-40, -7.1533e-40,
4.1950e-40, -1.1886e-40, -5.9922e-40,
1.9662e-01, 2.1402e-01, 3.1041e-02,
-1.1079e-01, 1.3361e-01, -2.1608e-01,
-1.7962e-01, -8.0576e-02, -3.1277e-01,
1.0620e-02, 2.4024e-01, 1.0657e-01,
-7.9906e-05, 2.8760e-01, 4.1231e-02,
-1.3261e-02, -1.0868e-01, -1.1267e-01,
-1.0659e-02, -2.6051e-02, -4.5389e-02,
5.8261e-02, 4.0288e-02, 6.7050e-02,
-2.6462e-01, -1.7846e-01, -1.0002e-01,
-6.2904e-02, 1.5275e-01, 4.4282e-03,
1.4446e-01, 1.1814e-01, -8.0349e-02,
2.0331e-02, 3.3014e-02, 1.2710e-01,
1.6084e-01, 3.8819e-01, 1.0854e-01,
-6.8126e-03, 3.5673e-01, 1.8938e-01,
-1.1660e-01, -5.7694e-02, -2.9194e-01,
1.2775e-02, -3.2769e-02, 1.7228e-02,
1.8324e-01, 1.1983e-01, -1.6944e-02,
1.0593e-01, 1.3451e-01, 5.2536e-02,
1.9147e-01, 1.3875e-01, 1.0298e-01,
-2.0871e-01, -1.7197e-01, 1.1342e-01,
-1.7581e-01, 4.0972e-02, 2.9796e-01,
3.2588e-40, -4.3663e-40, -2.6518e-40,
3.2588e-40, -4.3663e-40, -2.6518e-40,
4.1600e-40, -4.4350e-40, -4.8744e-41,
3.7289e-02, 8.1769e-03, 1.7059e-02,
3.7735e-02, 6.6571e-02, -6.6137e-02,
-5.8890e-02, -7.7019e-03, -6.2128e-02,
-4.0751e-02, 1.1710e-01, -1.1586e-01,
-1.2999e-01, -1.6384e-02, -2.1858e-01,
-2.8028e-01, -6.0443e-02, -1.1880e-01,
1.8152e-01, 1.5364e-01, 1.1781e-01,
2.9010e-01, 2.4612e-01, 1.3170e-01,
1.9022e-01, 1.8117e-01, 1.6483e-01,
9.3342e-02, 2.6607e-01, 1.4679e-01,
1.6729e-01, 2.5374e-01, 1.1954e-01,
6.3258e-02, 1.0557e-01, 6.7221e-02,
-5.2017e-02, 1.9628e-01, 1.7243e-01,
-3.2667e-02, 1.5756e-01, 1.9347e-01,
-9.5252e-02, -3.7525e-02, -3.4543e-04,
-4.9759e-02, 4.0383e-02, -2.0231e-02,
-1.1776e-01, 3.4182e-02, 3.6720e-02,
-1.4822e-02, -4.1658e-02, -1.3729e-02,
-1.9215e-02, 2.4427e-02, -9.0638e-02,
-1.4438e-01, -2.1785e-01, -5.1789e-02,
-2.0279e-01, -3.3918e-01, -1.6871e-01,
6.1262e-41, 2.4066e-40, 6.6851e-40,
5.3430e-40, -3.2335e-40, -3.7400e-40,
-6.3256e-40, -4.7491e-40, 2.2854e-40,
-6.8701e-03, -1.4849e-02, 8.6332e-02,
1.1686e-01, 1.8346e-01, 1.8797e-01,
-2.3251e-02, 7.3973e-02, 1.0532e-01,
-6.1838e-02, 5.6667e-02, 8.1584e-02,
-3.8900e-02, 7.0927e-02, 9.5606e-02,
-4.5098e-02, -1.0829e-01, -1.2224e-01,
3.5047e-03, 3.2898e-02, 3.5622e-02,
1.6170e-02, 4.3721e-02, 9.7496e-02,
2.3445e-03, 6.0417e-02, 1.3482e-01,
6.0570e-02, -5.7139e-03, -1.0883e-03,
2.2701e-02, -2.9113e-02, 7.9178e-03,
8.1214e-02, -4.1408e-02, 1.3616e-02,
-4.7985e-02, 1.0304e-02, -3.3236e-02,
-1.6334e-02, -8.1538e-02, 1.8629e-02,
-9.3720e-02, -1.2920e-01, -4.0836e-02
}
,
{
1.0443e-01, 1.5461e-01, -1.4743e-01,
1.6716e-01, 1.0532e-01, -2.3088e-01,
1.0218e-01, 1.2393e-01, -9.6646e-02,
1.7659e-01, -7.3279e-02, 1.9627e-02,
1.7721e-01, -1.4329e-01, -1.2533e-01,
1.6551e-01, -3.4616e-01, 9.5618e-02,
4.5827e-09, 9.3413e-09, 1.7015e-08,
1.2245e-08, 9.9727e-09, 6.7108e-09,
1.9612e-07, 3.9479e-08, 1.1537e-09,
2.2127e-02, 9.2715e-02, -1.2150e-01,
7.5652e-02, 1.1548e-01, -1.2420e-01,
-1.0693e-03, -7.2839e-02, -1.9664e-01,
1.4466e-01, -1.8552e-03, -1.3575e-01,
2.0699e-01, 8.0396e-02, -1.9651e-01,
-4.7075e-02, -5.1259e-02, -8.2593e-02,
-2.2385e-01, 3.0066e-03, -2.2659e-02,
6.1827e-02, 2.5331e-02, -5.3898e-02,
2.7091e-01, 1.0991e-01, -3.3600e-01,
-8.9499e-02, -9.3821e-03, 2.2675e-02,
1.1213e-01, 1.3276e-01, 2.0368e-02,
6.5408e-02, 4.1598e-02, -4.7917e-02,
6.0740e-03, 1.2236e-04, -1.0659e-01,
-1.8072e-02, -9.1082e-02, -9.0414e-02,
4.9052e-02, -1.4298e-01, -3.9721e-02,
1.1840e-01, 2.2503e-01, 2.4587e-02,
9.3023e-02, 6.9650e-02, 1.6798e-01,
-1.5640e-03, 1.6300e-02, 6.3585e-02,
1.4431e-01, 3.7885e-02, 1.6692e-02,
1.7345e-01, 7.2315e-02, 1.8942e-02,
1.1081e-01, 8.2973e-02, -9.7717e-02,
-5.2264e-03, -5.2641e-03, -5.2727e-03,
-5.2809e-03, -5.3125e-03, -5.3153e-03,
-5.2915e-03, -5.3251e-03, -5.3231e-03,
6.0008e-02, 2.0268e-01, 1.3396e-01,
-2.5202e-03, -1.7750e-02, -1.2019e-02,
1.1806e-01, -2.2306e-02, 3.6464e-02,
7.9324e-02, 3.1883e-02, 1.5483e-02,
-4.3537e-02, 1.2204e-02, 1.8905e-02,
-8.1581e-02, -1.1307e-01, -6.0718e-02,
-2.4865e-01, -1.0199e-01, 1.9886e-02,
-1.0519e-02, 6.9972e-02, 4.8012e-02,
-1.5282e-02, 1.1979e-01, 8.7968e-02,
-3.6752e-02, 1.9523e-02, 7.1321e-02,
-5.8295e-02, 5.3242e-02, 1.2773e-01,
-7.9671e-02, 8.3249e-04, 7.4904e-02,
1.1792e-01, 2.2135e-03, -9.0963e-03,
-2.8356e-03, -4.2661e-02, 6.9497e-02,
9.3561e-02, 1.0475e-01, 5.4745e-02,
-8.5901e-02, -2.1969e-01, -1.5572e-01,
3.6473e-02, 1.1097e-01, -2.6830e-02,
1.2199e-02, 1.8917e-01, 1.1906e-01,
1.0664e-01, -2.7005e-01, 1.5492e-01,
-4.1771e-02, -1.6580e-01, 2.9234e-02,
-1.9854e-02, 2.1436e-01, -1.1100e-01,
4.5382e-04, 4.2085e-04, 5.6852e-04,
3.4951e-04, 3.7354e-04, 3.2786e-04,
2.0790e-04, 2.8606e-04, 3.2415e-04,
-1.5500e-02, 2.2865e-02, -3.0070e-01,
1.8467e-01, 2.4899e-01, 1.4812e-02,
-1.2318e-01, 2.3175e-01, 7.2244e-02,
1.6713e-01, 1.9089e-02, -2.7494e-01,
1.0202e-01, 2.9200e-01, -3.6055e-03,
1.3265e-01, 2.2551e-01, 1.9897e-01,
-3.9474e-02, 1.6262e-01, 1.6726e-01,
-8.6222e-02, 2.0573e-01, -7.3247e-01,
-9.5391e-02, 3.8933e-01, 1.5861e-01,
-1.2202e-01, -6.4735e-02, -1.1762e-01,
-2.2427e-02, -1.9171e-01, -1.6092e-01,
3.2356e-01, -2.2234e-01, -1.3743e-01,
-1.1493e-01, -2.4936e-02, 2.9212e-02,
-9.8112e-02, -1.8021e-02, -1.0507e-01,
-1.0168e-01, 1.1759e-01, -9.8203e-02,
-2.8871e-02, 1.3249e-01, 7.8378e-02,
-1.1012e-01, -4.0596e-02, 5.4202e-02,
4.9022e-02, -1.1744e-01, 9.8888e-02,
1.3343e-02, 1.4358e-01, -8.7142e-02,
1.9952e-01, 3.3708e-02, 2.0721e-02,
2.6527e-02, -2.3822e-01, 2.4706e-01,
-3.2750e-04, -2.8475e-04, -6.3494e-05,
-2.2378e-04, -1.8046e-04, -1.9242e-05,
-4.2124e-05, -2.2062e-05, 4.5500e-07,
1.1692e-01, 4.0366e-01, -1.8709e-02,
8.2700e-02, 1.7884e-01, -1.3520e-01,
3.7758e-02, 3.7048e-02, -2.8109e-01,
-2.3438e-01, 5.9423e-02, -1.7300e-01,
1.0343e-02, 7.2307e-02, -4.3852e-01,
-5.7429e-02, -4.9136e-02, -8.0327e-02,
8.1094e-02, 2.9118e-02, 1.6677e-01,
1.2155e-01, 6.5358e-01, 2.4544e-01,
3.1163e-02, 3.7463e-02, -2.6613e-01,
1.2723e-01, 1.2541e-01, 1.4319e-02,
1.9055e-01, -5.7441e-02, 1.1146e-01,
-1.0690e-02, -1.7567e-01, -1.2238e-01,
-2.0879e-01, -6.5278e-02, -7.9327e-02,
-1.6564e-01, -1.3659e-01, -2.6231e-01,
-3.1916e-01, -2.6553e-01, -9.8647e-02,
-1.0617e-01, 1.2782e-01, -2.1053e-02,
-1.2329e-01, 1.4952e-01, -1.7466e-02,
-1.6969e-01, 3.6980e-02, -6.7732e-02,
-3.1220e-02, 4.0615e-02, -1.5251e-01,
-2.0017e-01, 2.2421e-01, -2.5682e-02,
-6.5873e-02, 1.8346e-01, 1.2982e-02,
1.4021e-06, -1.6929e-05, -8.4696e-05,
1.9580e-05, 2.9943e-06, 3.0084e-06,
2.0769e-04, 1.4661e-05, 2.9503e-06,
-1.4485e-01, 1.8841e-01, -1.7954e-01,
2.1551e-01, 2.2601e-01, -8.6689e-03,
8.6926e-02, -6.8989e-02, -1.2683e-01,
-8.7712e-02, 6.3176e-02, 1.1983e-01,
1.0790e-01, 6.6418e-02, 6.5849e-02,
1.2483e-01, 1.2428e-01, 4.4994e-02,
1.5139e-01, -1.2116e-01, -3.5497e-01,
-6.1889e-02, 3.4088e-01, 1.3148e-01,
-1.6478e-01, 4.4477e-02, -1.1979e-01,
3.8343e-02, 1.7992e-01, 3.6790e-01,
3.0426e-01, 1.1235e-01, 4.9815e-01,
2.6290e-01, 1.9703e-01, 1.5881e-01,
-6.4678e-03, 2.4401e-01, 1.9266e-01,
-1.4089e-01, 1.2323e-01, 4.4340e-02,
-8.8856e-02, 8.4036e-02, -9.8488e-02,
-1.7377e-03, -1.7654e-03, -1.7223e-03,
-1.7651e-03, -1.7919e-03, -1.7491e-03,
-1.7172e-03, -1.7446e-03, -1.7041e-03,
-3.0384e-04, -2.9297e-04, -2.4838e-04,
-3.2961e-04, -3.1678e-04, -2.7009e-04,
-3.1665e-04, -3.0492e-04, -2.6122e-04,
3.7109e-40, -3.7915e-40, -5.2536e-40,
5.8286e-41, -5.6108e-40, 4.3331e-40,
-3.0184e-42, -4.8987e-40, -5.1788e-40,
-4.0457e-04, -4.3257e-04, -4.1616e-04,
-4.2268e-04, -4.5118e-04, -4.3407e-04,
-3.9446e-04, -4.2199e-04, -4.0650e-04,
-1.1253e-16, -1.1328e-14, -2.0489e-14,
-3.0346e-19, -1.7189e-16, -4.5141e-16,
-2.4957e-30, -1.8191e-23, -3.5882e-22,
-3.1610e-36, -1.7544e-24, -2.2187e-21,
-4.2887e-19, -1.5526e-15, -1.5160e-14,
-1.7750e-16, -6.8066e-14, -3.3764e-13,
-6.9570e-24, -5.1139e-23, -2.9335e-23,
-1.9091e-22, -1.0323e-21, -4.5931e-22,
-2.0010e-22, -9.3710e-22, -3.5622e-22,
-2.9470e-04, -2.9081e-04, -2.5958e-04,
-3.2290e-04, -3.1810e-04, -2.8461e-04,
-3.1795e-04, -3.1356e-04, -2.8121e-04,
6.1623e-02, 1.7057e-01, 8.0478e-02,
1.2624e-01, 1.8468e-01, 2.1901e-02,
7.6033e-02, 1.3455e-01, 8.4037e-02,
8.4434e-02, -1.7069e-02, -7.8318e-02,
4.9244e-02, 4.4782e-02, -6.9747e-02,
1.2915e-01, 1.1453e-01, -6.5243e-02,
-5.0985e-03, -5.1407e-03, -5.1687e-03,
-5.1185e-03, -5.1511e-03, -5.1712e-03,
-5.0986e-03, -5.1272e-03, -5.1409e-03,
-1.8186e-02, 6.2680e-02, 3.3235e-02,
1.3398e-02, 1.6497e-01, 4.3523e-02,
-2.4101e-02, 1.3316e-01, 1.8373e-02,
-6.2677e-04, 6.5026e-03, 2.5948e-02,
6.6542e-02, 1.2352e-01, 1.5155e-02,
-8.6237e-02, -2.0907e-02, 1.0237e-02,
-1.7807e-01, -8.6196e-02, -3.2408e-02,
-8.1946e-03, -1.3957e-02, -1.6733e-01,
2.6269e-02, 1.6817e-01, 9.4029e-02,
3.4005e-02, -1.2833e-02, -1.2038e-01,
-4.8950e-02, 3.9857e-02, 1.4048e-02,
-6.4758e-02, 9.9603e-02, 1.0748e-01,
-1.0850e-02, 9.8875e-02, -4.4439e-02,
9.1219e-02, 6.6400e-02, -6.7693e-02,
5.3318e-02, 1.1838e-02, -1.5164e-01,
-5.8568e-02, 1.1249e-01, -3.8286e-02,
-7.1122e-02, 9.5799e-02, 3.8521e-02,
-1.3846e-01, 1.4167e-01, -3.5500e-03,
-1.0343e-01, -3.3025e-02, 3.7186e-02,
-2.0769e-03, 1.3558e-01, -1.3009e-01,
1.0167e-02, 1.5358e-02, -9.8009e-02,
2.4123e-05, -1.1800e-05, -1.4180e-04,
3.5217e-05, -6.3838e-06, -1.2243e-04,
8.5525e-05, 2.1599e-06, -5.3290e-05,
-1.4471e-01, 2.0111e-02, -1.2449e-01,
5.3368e-02, 3.2918e-01, 1.4034e-01,
-1.1833e-01, -1.9225e-02, -1.2658e-01,
-2.6966e-01, 1.1751e-01, 9.7072e-02,
-1.9929e-01, 9.7986e-02, -5.1240e-02,
-9.5073e-02, -6.8070e-02, -2.1318e-01,
9.5305e-02, -4.0551e-02, -1.0936e-01,
5.2687e-02, 4.5340e-01, 2.3531e-01,
-1.3385e-02, 1.5922e-01, -1.8371e-01,
-1.2203e-01, -7.2567e-02, -3.0000e-01,
-3.4356e-02, -1.3471e-01, -9.0995e-02,
-2.5230e-01, -2.4846e-01, -1.8529e-01,
-1.6962e-01, 1.0905e-01, 1.1557e-01,
-1.4405e-01, 8.9191e-02, 1.1715e-01,
-1.3237e-01, 5.2092e-02, -1.2227e-01
}
,
{
2.0013e-01, 2.2105e-01, 1.9196e-01,
6.8158e-02, 1.7154e-01, -8.6677e-02,
9.2652e-02, 1.0789e-01, 1.6745e-01,
-2.9254e-01, -7.6815e-02, 5.8812e-02,
-4.6466e-02, 1.3941e-02, 2.3353e-01,
-1.5033e-01, 7.5167e-02, 1.4433e-01,
2.8008e-02, 3.1625e-01, 3.2877e-02,
-5.8835e-02, -1.7305e-01, -6.1558e-02,
-1.2227e-01, 3.9931e-02, 3.0300e-02,
2.3004e-01, 4.1834e-02, -5.7790e-02,
-2.2861e-01, 2.9314e-01, 1.6884e-01,
-2.8009e-02, 4.7550e-02, -4.4542e-02,
-2.4674e-01, -1.5483e-01, 3.2653e-02,
-2.1574e-01, 3.1083e-01, -1.4025e-03,
1.7354e-02, 5.6417e-02, 1.0844e-01,
-4.2681e-40, 4.5893e-42, -7.4234e-40,
1.7665e-40, 4.0151e-40, 4.6269e-40,
2.5452e-40, -7.0179e-40, -1.2338e-40,
-1.4957e-01, -1.9087e-02, 7.1170e-02,
-1.4435e-01, 8.9560e-02, 1.3879e-01,
-3.6992e-02, 5.9822e-02, 1.9241e-02,
-2.4402e-03, 1.5097e-01, 6.3958e-02,
-1.7630e-01, 3.6009e-01, -2.0383e-01,
-8.5106e-03, 4.0863e-03, -2.7575e-02,
7.8942e-02, -1.8640e-01, -6.7715e-02,
7.2777e-02, -1.3804e-01, -7.0332e-02,
1.5185e-01, -4.3530e-02, 1.4502e-01,
-3.2928e-02, -3.0583e-02, 9.2061e-02,
1.2493e-01, 1.0400e-01, 1.3780e-01,
1.4438e-01, 8.2051e-02, 1.6159e-02,
2.7478e-02, 1.7768e-01, 2.5945e-01,
-3.4662e-01, 2.0330e-03, 8.8118e-02,
-2.9628e-01, -1.3212e-01, -1.8145e-02,
-1.9330e-01, 3.9238e-02, -4.6944e-02,
-1.5668e-01, -5.7104e-02, 1.9558e-01,
6.5305e-02, 5.9933e-02, 7.7337e-02,
-2.4906e-02, -1.1235e-01, 1.3822e-02,
-3.9988e-02, -9.1882e-03, 1.9204e-02,
1.0504e-01, 4.6820e-03, -2.1836e-02,
-2.6953e-40, 2.5334e-40, -1.3028e-40,
1.4110e-41, 5.6841e-40, 3.6368e-40,
-1.1746e-41, -7.0658e-41, -3.9413e-40,
1.5025e-02, 7.4419e-02, 9.5652e-02,
5.0297e-02, 6.6704e-02, 5.7316e-02,
2.5102e-02, 1.1985e-01, 2.6043e-02,
3.3297e-02, -7.7374e-02, -1.1114e-01,
-7.5586e-02, -1.9338e-02, -1.3739e-02,
4.5616e-02, -6.4946e-02, -6.9372e-02,
-7.5874e-03, -1.1141e-01, -2.9135e-02,
-6.9436e-03, -1.4418e-02, 1.6436e-03,
-1.3051e-01, -1.3324e-01, -9.3934e-02,
1.2184e-01, 1.9386e-01, 1.7995e-01,
-2.7452e-02, 9.9736e-02, 1.0020e-01,
-6.3290e-02, -2.1447e-02, -1.7005e-01,
1.3857e-01, 2.3338e-01, 2.5410e-01,
2.3002e-01, 1.9551e-01, 1.4452e-01,
4.7040e-01, 2.2647e-01, 1.5215e-01,
2.6927e-02, -2.1304e-01, -1.4762e-01,
-5.6998e-02, 2.9064e-01, 1.8085e-01,
8.9393e-02, -1.7463e-01, -2.7095e-01,
3.8434e-02, 1.7198e-01, -1.8122e-02,
-1.3857e-01, 1.9418e-01, 1.5019e-01,
-5.6337e-02, -5.3265e-01, 3.2122e-01,
-2.4484e-40, -5.3707e-40, 1.5854e-41,
5.1791e-40, -4.1875e-41, 5.6732e-40,
1.3048e-40, 1.6452e-40, -4.5028e-40,
-3.0692e-02, 1.8569e-01, 2.0327e-01,
-7.4756e-02, -5.1765e-02, 4.2475e-02,
-9.0675e-02, -3.0438e-01, -3.5088e-01,
-1.9129e-02, -1.5663e-03, 4.9895e-02,
-1.9441e-02, 9.3237e-02, 1.2910e-01,
-2.3919e-02, -4.0539e-01, 2.8167e-02,
2.0203e-01, 3.3424e-02, 1.7927e-02,
4.1923e-02, -1.6967e-01, 2.5656e-02,
-1.5869e-01, -1.8727e-01, 2.7860e-03,
-4.0276e-02, -6.7792e-03, 3.3699e-02,
-6.7044e-03, 1.7686e-02, 2.9786e-02,
-1.5623e-02, 3.7904e-02, 2.4737e-02,
-1.2282e-01, -3.6563e-02, 4.1976e-02,
-9.9622e-03, 8.8981e-02, 2.1364e-02,
-8.5668e-02, -1.6803e-01, -4.4974e-02,
1.3164e-01, 4.1294e-01, 1.8897e-01,
2.1991e-01, 1.6247e-02, 1.1569e-01,
-3.0142e-02, 1.4069e-02, 3.6646e-02,
-2.6816e-02, -3.9767e-02, 1.4061e-01,
-1.3603e-01, -2.0649e-01, 7.5837e-02,
-1.6984e-02, -8.3800e-03, 2.3652e-04,
1.5049e-40, 4.6504e-40, 1.3625e-40,
-7.5358e-40, -3.4257e-40, 9.9763e-41,
4.7243e-40, 7.4890e-40, -7.9440e-42,
-5.9692e-02, -2.8047e-02, 2.3795e-02,
-3.5284e-02, 1.1448e-02, 5.0302e-04,
-3.5066e-02, 4.6185e-02, 1.2167e-02,
3.7583e-02, -3.6598e-02, 1.0206e-01,
-9.6229e-02, -1.5977e-01, 4.9157e-02,
3.7293e-02, 5.8766e-02, 1.0448e-02,
1.1490e-01, 1.4459e-01, 8.6936e-02,
2.8609e-01, -4.8108e-02, 9.0023e-02,
6.7941e-02, -5.7148e-03, 1.0021e-01,
7.3816e-02, 7.3794e-02, 8.0970e-03,
2.8307e-02, 3.6635e-03, -1.1769e-01,
4.1374e-02, 3.9933e-02, -4.4292e-02,
5.9423e-02, 1.9009e-01, -2.3735e-01,
-2.6670e-01, 5.8789e-01, -2.0048e-01,
-3.7082e-01, 1.8045e-01, 5.4820e-02,
-6.3567e-01, 2.0098e-01, 1.0653e-01,
-2.5056e-01, 6.5065e-01, -4.0471e-01,
5.4715e-02, 2.4375e-01, -2.7402e-01,
1.5982e-01, 1.0923e-01, 2.1566e-01,
2.0239e-01, -9.0221e-02, -4.4606e-01,
1.0550e-01, 5.4666e-02, -2.7134e-01,
-4.6424e-40, 2.9137e-40, 7.4968e-41,
1.2376e-41, -5.6213e-40, -6.3457e-40,
2.5404e-40, 2.0013e-40, 3.5611e-40,
5.5423e-02, 3.9843e-02, -1.7509e-01,
5.4480e-02, 5.0331e-02, -1.6793e-01,
6.6093e-02, 3.0163e-02, -8.2023e-02,
-1.5490e-01, 1.7457e-01, 2.7832e-01,
1.1482e-01, 2.5759e-01, -2.4199e-01,
-9.3891e-02, 9.1921e-02, -6.4480e-03,
1.9266e-01, 5.2907e-02, 7.0289e-02,
1.3582e-01, 6.4246e-02, 1.4989e-01,
6.2013e-03, -6.8884e-02, 6.8734e-02,
-1.0483e-01, -7.7134e-02, -3.6204e-02,
1.7590e-02, 5.0844e-02, 1.4234e-01,
7.2913e-02, 6.0726e-02, 6.4414e-02,
-8.5021e-02, -1.0621e-03, 5.5851e-02,
2.4666e-01, 6.5652e-02, -1.8180e-02,
1.5225e-01, 1.2928e-01, 3.1578e-03,
1.1468e-01, 1.9544e-01, 6.6637e-02,
6.3430e-02, 2.0542e-01, 7.0876e-02,
3.4779e-02, 1.0037e-02, -2.2134e-02,
-6.9304e-02, 1.1184e-01, -3.7015e-02,
-1.7634e-01, 1.2475e-01, 9.1947e-02,
-6.0550e-02, -1.3904e-01, 7.5192e-02,
-2.2871e-40, 4.7367e-41, -1.0711e-40,
-2.8662e-40, 4.0542e-41, 3.3067e-40,
-4.4395e-41, -7.2684e-41, 1.8695e-40,
-1.6702e-01, -2.6654e-01, 8.7902e-03,
-2.0108e-01, -3.8093e-01, -8.3700e-02,
-7.5433e-02, -2.0689e-01, 2.7951e-02,
2.9938e-03, 1.1378e-01, 7.1598e-02,
-1.6031e-01, 1.3475e-01, 1.5800e-01,
-7.2019e-02, -1.1663e-01, 8.0692e-02,
1.0610e-01, 1.1163e-02, -1.4959e-01,
-1.1576e-01, -8.5645e-02, 4.0414e-02,
5.6245e-02, 1.7056e-01, 2.5734e-01,
-6.1086e-02, -7.0851e-02, 7.6851e-02,
-2.7595e-02, -6.0890e-02, 4.7472e-02,
7.1059e-03, 6.0942e-05, 7.4915e-02,
1.9350e-01, -1.8458e-02, -2.3040e-02,
6.3477e-02, 1.1923e-01, 9.9319e-02,
6.4839e-02, 2.7973e-01, 1.2902e-01,
-1.7829e-01, 5.7083e-03, -6.1680e-03,
-1.1256e-01, -2.7951e-02, -2.1544e-01,
-2.1614e-02, -7.1468e-02, -2.2054e-02,
-8.7543e-02, -1.2982e-01, 1.9386e-01,
-5.7157e-03, -1.0108e-01, 1.4467e-01,
-6.5742e-02, -7.2054e-02, 1.7924e-01,
7.5418e-40, 6.3043e-40, 4.9815e-40,
-1.0952e-40, 3.0327e-40, -2.3848e-40,
4.1302e-40, 2.0150e-40, -1.6509e-40,
-1.3985e-02, -1.0550e-01, 5.8772e-02,
-1.7108e-02, -7.3644e-02, 3.3014e-02,
-1.8224e-03, 2.8931e-03, 9.2762e-02,
4.1531e-02, -1.5139e-01, -1.7773e-01,
9.6548e-02, -1.1914e-01, -4.6536e-02,
8.6754e-02, -4.0057e-03, 1.8983e-01,
1.6545e-01, -4.7311e-02, -7.2455e-03,
3.7567e-01, 1.8883e-01, -7.4325e-02,
-5.8252e-02, -1.3811e-02, -7.0470e-02,
-3.2943e-02, -7.0770e-02, -1.4700e-01,
1.7043e-02, 9.4331e-02, 4.2857e-03,
4.1247e-03, 1.6690e-01, 4.2146e-02,
1.1420e-01, -7.4456e-02, -3.8763e-02,
1.6807e-01, 9.3636e-03, -1.1796e-01,
1.7703e-01, 1.1386e-03, -6.8707e-02,
1.0259e-01, -1.8918e-02, 6.5902e-03,
1.2421e-02, -7.8960e-02, 2.1766e-02,
1.3062e-01, 4.6001e-02, 2.4199e-01,
-1.2955e-02, -1.9329e-01, 5.2074e-03,
5.9446e-02, 1.8832e-01, 2.2094e-01,
-1.0954e-01, -8.1867e-02, -4.3324e-02,
-3.9596e-41, 2.8677e-40, -6.5843e-40,
4.2812e-41, -3.5323e-40, 4.8298e-40,
7.6351e-40, -2.4759e-40, 7.3030e-40,
-1.1284e-01, -8.4171e-02, -1.5935e-01,
-3.2299e-02, 1.5427e-01, 8.9029e-02,
-3.8815e-02, 1.3098e-01, -4.3065e-02,
-2.5276e-01, -1.7018e-01, 9.7901e-02,
1.4218e-01, 3.1236e-01, 2.9636e-01,
-2.3613e-02, -5.5258e-02, -2.0550e-01
}
,
{
0.0333, 0.1145, -0.0922,
0.1185, 0.4533, -0.2015,
-0.0774, 0.1759, -0.0496,
0.0954, -0.0499, 0.0824,
0.1059, 0.0173, -0.0586,
-0.0666, -0.0287, -0.0652,
-0.0558, -0.1362, 0.0015,
0.1277, 0.1020, -0.1369,
0.0020, -0.0103, -0.0804,
0.0507, 0.1404, -0.0241,
0.0520, 0.1239, 0.0633,
-0.0268, 0.0335, 0.0883,
-0.0549, -0.1022, -0.0515,
-0.0163, -0.1167, -0.0442,
0.0858, -0.0804, -0.0014,
0.0354, -0.0666, -0.2105,
-0.0950, 0.1578, -0.0920,
-0.1303, 0.0299, -0.0195,
-0.0281, -0.1993, -0.0154,
0.0796, 0.0503, 0.0954,
0.0540, 0.0212, 0.0389,
-0.1387, 0.1091, -0.1212,
0.1556, 0.3573, 0.0976,
-0.0587, -0.2070, 0.2067,
0.0138, 0.0051, -0.1008,
0.2877, 0.1079, -0.0681,
0.0953, -0.0739, -0.2349,
0.1482, 0.0657, 0.0480,
0.1590, -0.0009, 0.1402,
0.0700, 0.0435, 0.1190,
0.0957, 0.0117, -0.1010,
0.1790, -0.0200, -0.0765,
0.0797, 0.1455, -0.0340,
0.0008, -0.0267, 0.0089,
0.0644, 0.0647, 0.0397,
0.0463, -0.0116, -0.0771,
0.2237, 0.0324, 0.0192,
-0.0082, -0.0345, 0.0294,
0.0719, -0.0185, 0.1008,
-0.0307, 0.0134, -0.0747,
0.0776, -0.1485, 0.0135,
0.0965, -0.0665, -0.1263,
-0.0101, -0.0097, -0.0144,
-0.0022, -0.0083, 0.0277,
0.0136, -0.0076, 0.0314,
-0.0008, 0.0722, -0.0704,
0.0053, 0.0767, 0.0368,
-0.0189, -0.1354, 0.0231,
-0.1416, 0.1945, -0.1756,
0.2058, 0.0401, -0.1348,
-0.0945, -0.2530, -0.3082,
-0.0096, 0.0871, 0.0699,
-0.0092, 0.0423, 0.0995,
-0.0914, -0.0570, -0.0718,
-0.0739, -0.2749, -0.2320,
0.1488, -0.2698, -0.1977,
0.1445, -0.1655, -0.0758,
0.2035, -0.0138, 0.0332,
0.0282, -0.2247, -0.0945,
-0.0614, -0.2484, -0.0595,
-0.1174, -0.1252, 0.1969,
-0.1101, -0.2950, -0.2164,
-0.0348, -0.0891, 0.1250,
0.0195, 0.0050, 0.0300,
-0.0508, -0.0316, -0.0194,
0.0199, 0.0345, 0.0444,
-0.0022, -0.0529, 0.1604,
0.0756, -0.2015, -0.2117,
-0.0837, -0.1270, 0.1330,
0.0286, 0.0952, 0.1082,
0.0724, -0.0446, -0.1156,
0.0545, 0.0444, -0.0291,
0.0759, 0.1110, 0.0944,
0.1615, 0.4302, -0.1060,
0.0418, -0.0281, -0.1378,
-0.0757, -0.0527, -0.1578,
0.0123, -0.0427, 0.1504,
0.0694, 0.0690, 0.0203,
0.2132, -0.3449, 0.0936,
0.2491, 0.0279, -0.0884,
-0.0447, 0.1589, -0.0054,
-0.0246, 0.1247, 0.0403,
0.0513, -0.0541, -0.1141,
0.0712, -0.1174, -0.0051,
0.2304, 0.2431, -0.0517,
-0.1548, -0.0401, 0.2032,
-0.0087, -0.1676, -0.0600,
0.1094, -0.0329, 0.0530,
-0.0580, 0.1499, -0.0806,
-0.0086, -0.1400, -0.0636,
0.0708, -0.1003, -0.1113,
-0.0732, -0.1199, 0.0060,
-0.0534, -0.0011, 0.0965,
-0.0268, 0.0116, -0.1161,
0.0787, 0.3925, -0.0819,
-0.0041, -0.0892, -0.2063,
-0.1296, 0.0924, -0.0079,
0.5625, 0.4013, 0.1645,
-0.0137, -0.1935, 0.2714,
0.0980, 0.0016, -0.1461,
0.1576, 0.0305, -0.1450,
0.1503, -0.0303, -0.1403,
0.0262, -0.0077, 0.0459,
0.2718, 0.0754, 0.2404,
0.1381, -0.1499, 0.0016,
0.1454, -0.1278, -0.0085,
0.1674, -0.0834, 0.1993,
0.0874, -0.0598, -0.0188,
0.2003, 0.3296, 0.0153,
-0.0154, 0.5550, -0.0945,
0.0489, 0.0415, -0.0940,
0.0164, 0.0791, 0.1077,
-0.0893, 0.1231, 0.0473,
-0.0319, 0.1444, 0.1690,
-0.0518, -0.1404, -0.1778,
-0.0170, 0.1395, -0.0234,
0.0128, -0.0112, -0.0472,
0.1039, 0.1982, -0.0272,
0.0282, -0.1199, -0.2622,
-0.0449, 0.0239, -0.1030,
-0.0840, -0.1044, -0.0646,
0.0588, 0.1937, -0.2494,
0.0180, 0.0747, 0.1530,
0.0500, 0.1756, 0.0491,
-0.1113, -0.0079, 0.0854,
-0.1493, -0.0559, -0.0373,
0.1972, -0.3158, -0.0500,
0.1932, 0.3177, -0.0018,
-0.0516, -0.1144, 0.0686,
0.0175, 0.0598, 0.0345,
-0.0667, -0.1078, 0.0384,
0.0897, 0.2198, -0.0531,
-0.2596, -0.1997, 0.0195,
0.0332, 0.4098, 0.1381,
0.1985, -0.0669, -0.1275,
-0.0751, -0.2388, -0.0672,
0.0090, 0.0891, -0.0362,
0.1392, -0.0518, 0.2039,
0.2079, -0.1202, 0.0707,
0.0498, -0.1237, -0.0665,
-0.0398, -0.1557, -0.0928,
0.0505, 0.1220, 0.0352,
-0.0674, -0.1159, 0.0724,
-0.0331, -0.1751, 0.0766,
0.0992, -0.0763, 0.0090,
-0.1223, 0.2621, -0.2029,
0.0509, -0.0279, -0.1061,
0.0598, 0.0353, -0.1610,
0.0165, 0.0835, 0.0704,
-0.0079, -0.0982, 0.0187,
0.2331, -0.1929, 0.0684,
-0.0507, 0.1476, -0.0886,
-0.0275, 0.1658, 0.0697,
-0.1123, -0.0069, -0.0851,
-0.0377, -0.0917, -0.0629,
-0.0420, 0.0506, 0.1111,
0.1086, 0.1351, -0.0851,
0.0466, 0.2750, 0.0185,
-0.0208, 0.2090, 0.0271,
0.0217, -0.0548, 0.0078,
-0.0609, 0.1029, -0.1641,
0.1392, 0.0115, 0.0317,
-0.0570, 0.1060, 0.1814,
-0.2015, -0.1301, 0.1082,
0.2452, -0.1815, -0.0046,
0.0103, -0.0466, -0.0895,
0.0158, -0.0594, -0.1386,
-0.0073, -0.0719, -0.0716,
0.1308, -0.0206, 0.0511,
-0.0437, -0.0763, 0.0287,
0.0493, -0.1239, 0.0219,
-0.0041, 0.0373, 0.0262,
0.0078, -0.0249, -0.0284,
0.0598, -0.0205, -0.0276,
0.0115, -0.1778, -0.0395,
0.1673, -0.0036, 0.2334,
0.0706, -0.0694, 0.0177,
0.1123, -0.0043, 0.0716,
-0.0894, -0.1609, 0.0334,
-0.0046, -0.2006, -0.0977,
-0.0127, 0.1198, -0.0339,
-0.0283, 0.1354, 0.1637,
-0.1696, 0.0187, -0.2621,
0.0496, 0.2834, 0.0423,
0.1126, 0.3962, 0.1660,
-0.0750, 0.1955, 0.0590,
-0.1088, -0.1146, -0.1219,
0.1360, 0.1524, 0.0498,
-0.1151, 0.0219, -0.0063,
-0.0821, 0.0247, -0.1065,
0.1153, 0.2085, 0.0618,
-0.0383, 0.0527, -0.2067
}
,
{
1.8014e-01, 2.1908e-01, -2.1088e-03,
1.7345e-01, 2.7654e-01, 1.3607e-02,
1.1363e-01, 9.9105e-02, -6.5730e-02,
-3.5679e-02, 9.6072e-03, 4.0721e-02,
-1.8771e-02, -2.3484e-04, -1.0230e-02,
1.6965e-02, -1.3032e-02, -6.3906e-02,
-4.5686e-02, -3.6733e-02, -4.8873e-02,
4.0752e-02, 2.1615e-02, -1.4822e-02,
1.1689e-01, 3.0153e-02, -5.0163e-04,
-7.0394e-03, -1.2387e-01, -8.9243e-02,
-1.8312e-01, -1.3868e-01, -6.2618e-02,
-8.1627e-02, -2.0480e-01, -3.0740e-01,
4.4296e-02, 3.8572e-02, 4.3754e-02,
1.7538e-01, 5.3284e-02, -7.5663e-03,
1.9670e-01, -1.2397e-01, -1.6266e-01,
1.4575e-01, -5.7771e-02, 2.7619e-02,
2.2757e-02, -4.8910e-01, -2.6201e-01,
3.6513e-02, -2.0704e-01, -1.3225e-01,
-6.7533e-02, 1.1289e-02, 7.1316e-02,
-7.6847e-02, 6.8128e-02, 7.4717e-02,
1.1269e-01, 2.9978e-02, 3.2132e-02,
-5.4557e-02, -4.4599e-02, 4.1835e-02,
5.7964e-02, -2.1246e-03, 1.5007e-01,
1.8432e-01, 1.1463e-01, 2.2691e-01,
9.6166e-02, 4.7887e-02, -3.8399e-02,
5.8153e-02, -2.0255e-02, -1.1362e-01,
2.6402e-02, 2.5562e-02, 1.9096e-02,
1.1588e-01, 1.4540e-01, 1.1948e-01,
1.0360e-01, 5.9083e-02, 1.9263e-01,
1.6953e-01, 2.7390e-02, 9.7883e-02,
1.5059e-01, 6.7593e-02, -4.5843e-03,
8.7031e-02, -2.0926e-03, -6.3056e-02,
-6.6960e-02, -5.2056e-02, -7.3570e-02,
1.4361e-02, 1.1059e-01, -4.9720e-02,
4.4270e-02, 3.9995e-02, 4.3101e-03,
-1.1042e-01, 4.5028e-02, -8.9124e-02,
-1.2906e-01, -7.6972e-02, -6.5449e-03,
-1.9269e-01, 2.8349e-01, 1.1573e-01,
-1.7983e-01, 9.7615e-02, 9.4003e-03,
-4.7802e-02, -1.5889e-01, -1.2693e-01,
7.4717e-02, 2.8655e-01, -7.2637e-02,
1.5837e-02, 8.7125e-02, -1.2198e-01,
-1.7754e-02, -5.6443e-02, -9.8661e-03,
6.3040e-02, 2.0249e-02, -3.5368e-02,
9.7756e-03, 2.6760e-02, -5.5172e-02,
-1.0406e-02, 4.8313e-02, 2.4717e-02,
-5.2851e-02, 6.8496e-02, -2.5933e-02,
4.5932e-02, 5.9892e-02, 1.9200e-02,
-5.1316e-40, -5.1811e-40, -1.5144e-40,
-6.7758e-38, -5.4608e-40, -3.9680e-40,
-1.9155e-39, 2.0423e-41, 1.5256e-41,
-2.5559e-08, -3.2461e-08, -2.6821e-08,
-3.6885e-08, -4.6896e-08, -3.9086e-08,
-3.4305e-08, -4.4160e-08, -3.7187e-08,
-3.7416e-40, 3.6550e-40, 5.0727e-40,
-1.6722e-40, 3.9228e-40, 5.4548e-40,
-5.7512e-40, -2.8156e-40, 9.4571e-41,
-4.7040e-40, -1.6974e-40, 6.3849e-40,
-3.7322e-40, 2.6014e-40, 2.3080e-40,
-2.8395e-40, -3.7116e-40, 4.4393e-40,
1.1597e-40, 4.3291e-40, 3.8219e-40,
3.3393e-40, 3.1747e-40, -1.8400e-36,
-5.5215e-40, 1.7648e-40, -1.6540e-35,
-3.0953e-40, 5.3063e-40, -1.6454e-40,
2.1341e-40, 2.0790e-40, -3.0226e-40,
-2.6807e-40, -1.6601e-40, 5.1829e-40,
-1.8897e-40, -4.5956e-41, 5.3784e-40,
-2.5661e-40, -2.1726e-40, 1.2010e-40,
1.8263e-41, 1.1214e-40, -3.7693e-40,
-4.2596e-40, 1.8854e-40, 5.5010e-40,
-6.6262e-40, -4.8808e-40, 3.3123e-40,
5.9379e-41, 2.3249e-40, 4.4504e-40,
-8.4836e-04, -8.4397e-04, -5.8640e-04,
-8.3506e-04, -8.0192e-04, -5.3901e-04,
-8.3539e-04, -7.8069e-04, -4.8720e-04,
-3.4706e-04, -4.4640e-04, -5.2353e-04,
-4.4518e-04, -5.3374e-04, -5.2734e-04,
-5.8780e-04, -5.8730e-04, -5.4362e-04,
-5.2452e-04, -5.4578e-04, -5.6266e-04,
-4.2387e-04, -4.4643e-04, -4.8936e-04,
-3.5880e-04, -3.7886e-04, -4.1998e-04,
-2.4479e-04, -4.0736e-04, -3.1189e-04,
-3.4922e-04, -4.0173e-04, -2.5042e-04,
-5.7091e-04, -5.2665e-04, -2.3293e-04,
-2.8505e-04, 9.7283e-05, 3.1209e-04,
-2.7463e-04, 1.8704e-04, 4.4351e-04,
-9.1436e-05, 3.2602e-04, 5.7573e-04,
-4.0112e-04, -4.2566e-04, -2.4300e-04,
-9.9362e-05, -6.5499e-05, 3.2872e-05,
1.1584e-04, 2.3417e-04, 3.4427e-04,
-7.5767e-05, 3.9768e-06, 6.2201e-05,
2.3151e-05, 2.5595e-04, 3.4038e-04,
-1.3871e-05, 3.0295e-04, 4.4170e-04,
-1.7802e-04, -4.5376e-04, -5.1847e-04,
-5.0687e-04, -5.5837e-04, -2.5917e-04,
-5.3992e-04, -7.1375e-04, -4.8728e-04,
-1.7543e-01, -3.4151e-01, -3.2619e-02,
-1.9701e-02, -1.5494e-01, -1.6534e-01,
3.5632e-02, -1.0897e-01, -3.8379e-02,
-6.1420e-02, -1.0735e-01, 1.4730e-01,
7.4386e-02, -1.0487e-01, 7.9646e-02,
1.7130e-02, 4.4391e-02, -5.1959e-03,
4.5682e-02, -1.1543e-01, 9.4035e-03,
-3.4376e-01, -1.1961e-01, 1.0099e-01,
1.1335e-01, 7.5840e-02, 1.0675e-01,
4.9539e-02, 8.7406e-02, 4.4951e-02,
1.8111e-01, 2.6406e-01, -1.5924e-02,
-1.1464e-01, 8.4579e-04, -6.6811e-02,
-8.9635e-03, 1.8236e-03, 3.6561e-02,
-7.0281e-02, 2.9717e-01, 3.1836e-02,
-1.3647e-01, -6.5627e-02, 9.3063e-02,
-2.1851e-01, -6.0226e-02, -1.0326e-01,
5.3441e-02, 1.9103e-01, -5.7999e-02,
-3.3512e-02, 1.5496e-01, -1.1111e-01,
2.3256e-03, -1.5004e-01, -9.1248e-02,
-9.7706e-02, 1.9549e-01, -1.5403e-01,
-1.5327e-01, 8.3335e-02, 5.6111e-03,
-1.5707e-01, 8.0277e-03, -7.3955e-02,
-1.4111e-01, -1.3548e-01, -1.0563e-01,
2.3054e-01, -2.1822e-02, -6.6938e-03,
-1.0259e-01, 4.3577e-02, -1.7630e-01,
1.6484e-01, 4.2413e-01, 6.9475e-02,
-2.4705e-01, 2.5757e-01, -9.5611e-02,
1.0236e-01, -3.4820e-02, -6.8818e-03,
-1.1434e-01, -3.1800e-01, 2.1337e-02,
-1.9939e-01, -2.6532e-01, 7.3361e-02,
6.5939e-02, 9.5812e-02, -7.0156e-02,
-1.6249e-02, -1.5927e-02, -1.1189e-01,
-9.3936e-03, -1.0933e-01, -2.9399e-02,
-2.8752e-02, -4.5613e-02, -1.2718e-02,
3.8781e-01, 2.6776e-01, -1.0373e-02,
-2.3927e-02, -6.4398e-02, 9.9117e-02,
-6.0732e-02, -5.5917e-03, 5.1716e-02,
-1.4168e-01, 1.7661e-01, -5.5893e-02,
-3.0419e-01, -3.5537e-01, 2.1978e-01,
-1.8610e-01, -5.7743e-03, 3.2649e-02,
1.9975e-01, 1.6508e-01, 1.3808e-02,
1.0733e-01, 1.4722e-01, 5.8671e-02,
6.4940e-02, 1.6114e-01, 3.9697e-02,
1.1530e-01, 2.4021e-01, -2.1669e-01,
6.0220e-02, 2.0257e-01, -1.5227e-01,
-6.1096e-02, 6.6511e-02, -1.3858e-01,
-6.5275e-02, 1.0891e-01, 8.2048e-02,
-6.7907e-02, 2.2863e-02, -1.0322e-01,
1.6542e-01, -1.4436e-01, 6.4125e-02,
-1.0378e-01, -3.2346e-01, -1.5123e-02,
3.8758e-03, 1.1006e-01, -4.4325e-02,
-1.0102e-01, -3.7699e-02, 9.2472e-02,
-6.8972e-02, -1.2308e-02, 1.6478e-01,
3.4351e-02, -1.7461e-02, 1.0301e-01,
-2.7125e-01, -5.6730e-02, -2.5989e-01,
-3.0163e-01, -1.4826e-01, -3.4955e-01,
-1.6259e-01, -1.6708e-01, -2.7964e-01,
-6.7134e-02, -2.2385e-01, 2.1776e-01,
-1.1351e-02, -3.7861e-01, 1.8687e-01,
4.0551e-02, 8.1943e-02, 1.0866e-01,
1.0273e-01, 1.1844e-01, -1.1852e-01,
2.6758e-02, -8.5806e-02, 5.9444e-02,
-5.1627e-02, 7.1636e-02, 2.2841e-01,
-3.7242e-03, 2.9723e-01, 1.1918e-01,
8.4994e-02, -3.5747e-01, 3.6148e-02,
9.9705e-02, -1.3736e-01, -6.0080e-02,
1.2370e-01, 5.0668e-02, -6.0246e-02,
6.0562e-02, -3.5068e-01, -3.2645e-01,
9.1020e-04, 6.6203e-02, -1.0770e-01,
1.9434e-02, 3.0018e-01, 2.8018e-01,
1.4021e-01, 2.7481e-01, 2.2868e-01,
4.8540e-02, 1.7719e-01, -4.5834e-02,
-9.6349e-02, -2.3008e-02, -1.4497e-01,
4.3053e-02, -1.0161e-01, 2.8750e-02,
-1.2594e-01, -1.0388e-02, -4.3966e-02,
7.5993e-02, -7.1609e-02, 1.4624e-02,
4.1110e-02, 7.1258e-02, -2.9109e-02,
-5.8698e-03, 1.2389e-01, 4.7648e-02,
-6.1585e-04, -4.4556e-02, -2.3373e-02,
-4.4883e-02, -7.7722e-02, -7.3635e-02,
-2.7750e-02, -1.5117e-03, -8.7368e-02,
2.5113e-02, 7.7490e-02, 2.9024e-02,
1.5426e-01, 2.5472e-01, 4.8057e-02,
-1.1969e-01, -1.1487e-01, -1.1802e-01,
-4.7392e-02, -4.2226e-02, 3.1968e-02,
-2.6717e-01, -5.0206e-02, 8.1946e-04,
-4.0426e-02, 1.4373e-01, -3.3121e-03,
-4.5292e-02, -2.4538e-02, 1.0377e-01,
-1.7780e-02, 2.0058e-01, -2.4343e-02,
-1.1714e-02, 1.5984e-01, -1.2638e-01,
6.4655e-02, 3.7703e-02, 3.7970e-02,
9.1864e-03, 1.1468e-01, -6.2760e-04,
-1.4812e-01, 6.5670e-03, 1.0765e-01,
1.5023e-01, -7.0594e-02, -1.3924e-01,
3.6016e-02, -3.9078e-02, -3.8950e-02,
1.8735e-02, -1.5573e-01, -1.2456e-01
}
,
{
4.8634e-02, -1.3617e-01, 6.1231e-02,
-7.0235e-02, -6.4110e-01, 1.5985e-01,
8.6151e-02, 1.1847e-01, 1.3819e-01,
-3.6017e-04, -3.2273e-02, -8.5485e-02,
-7.0804e-03, 2.1751e-01, 7.2575e-03,
-8.3606e-02, -1.4885e-01, -1.2702e-01,
4.0848e-41, 8.0934e-40, -1.8889e-40,
-3.9103e-40, -7.4709e-40, 3.8377e-40,
-2.4159e-40, -4.7610e-40, 7.7359e-40,
-8.6217e-05, -5.9763e-05, -4.0558e-05,
-7.4966e-05, -4.7074e-05, -3.1656e-05,
-9.8390e-05, -6.6833e-05, -4.7669e-05,
3.5375e-02, 2.8660e-02, 4.1277e-02,
1.6289e-01, -3.2199e-01, -1.7845e-02,
2.4659e-01, -3.9618e-02, 4.1065e-03,
2.7267e-02, 8.6819e-02, 9.5070e-02,
-7.2700e-02, -2.8826e-01, 1.1750e-03,
2.5259e-02, 2.4681e-03, 6.4737e-02,
7.3023e-03, 2.9631e-02, 1.0820e-02,
-2.1400e-02, 5.4244e-01, 1.5639e-01,
-1.7561e-01, 4.8947e-01, -8.8305e-02,
6.5073e-02, 3.4922e-01, 1.3483e-01,
1.4506e-01, -2.5472e-01, -7.2894e-02,
4.5945e-02, 1.4040e-01, 1.2148e-01,
-2.6932e-01, -1.1518e-01, -9.3158e-03,
-2.3961e-01, -1.2479e-01, -8.9796e-02,
1.8688e-02, -4.9267e-02, 7.7189e-02,
-7.3691e-02, 7.8186e-03, 1.3761e-02,
-1.5689e-01, 3.1138e-02, 3.9231e-02,
-4.3607e-03, 2.0813e-01, 5.5635e-02,
-6.7000e-41, 9.8995e-41, 3.0043e-40,
6.7190e-40, 4.0827e-40, 7.6057e-40,
4.2208e-40, 8.1141e-40, -3.3569e-40,
1.0179e-03, 5.1543e-04, 3.8076e-04,
7.3507e-04, 4.5432e-04, 3.7410e-04,
9.3014e-04, 6.7365e-04, 6.0051e-04,
-5.1998e-02, 6.5768e-02, 3.1603e-02,
-3.0198e-02, -3.1692e-02, -6.9299e-02,
1.7672e-02, 2.3766e-01, 5.7877e-02,
-5.7944e-02, 1.2624e-01, -1.4396e-01,
-4.1542e-02, 6.5110e-01, 1.0942e-01,
-1.3133e-01, 5.0538e-02, -2.7371e-02,
-3.7515e-02, 2.8703e-02, 1.2382e-03,
3.8542e-01, -2.2754e-02, 3.4459e-02,
3.0545e-01, -5.3817e-01, -2.1389e-03,
1.3888e-02, -2.2775e-01, -6.3692e-02,
-1.8430e-01, 5.8452e-02, 4.5764e-02,
-8.5045e-02, -1.7060e-01, -1.8565e-02,
-2.0384e-02, -3.3018e-02, -5.1135e-02,
-4.5789e-02, -1.8105e-01, 3.5419e-02,
-5.0081e-02, 8.7719e-02, 1.0373e-01,
-1.0033e-02, 7.0530e-02, -7.8012e-03,
8.4042e-02, 1.1982e-01, -9.6046e-02,
-6.4009e-02, -1.0711e-01, -1.3523e-01,
1.8868e-41, -7.0039e-40, -7.2568e-40,
1.7408e-40, -7.8143e-40, -6.8130e-40,
-6.3142e-40, -6.2560e-40, -7.4238e-40,
2.6297e-04, 7.0014e-05, -4.0981e-04,
2.6263e-04, 4.2811e-05, -4.9950e-04,
3.9795e-04, 1.2615e-04, -4.7660e-04,
7.5933e-02, 2.6295e-02, 2.7984e-02,
-5.5914e-03, -8.7981e-02, -9.2618e-02,
4.2725e-02, -3.1210e-01, 1.3412e-01,
5.2683e-02, 3.9891e-01, 2.9150e-02,
-6.6090e-02, 2.9455e-01, -1.9710e-01,
1.4546e-02, -2.5572e-02, 8.1125e-02,
1.2271e-01, 1.6097e-01, 4.5644e-02,
3.6101e-02, -1.7174e-02, 6.6110e-02,
1.5078e-01, 4.5180e-01, 7.7154e-02,
-5.9725e-02, 1.0185e-01, 1.1363e-03,
6.7791e-02, 1.7696e-02, 5.2638e-02,
3.3051e-02, -8.4049e-02, 1.4380e-01,
1.8744e-02, -2.0940e-01, -2.1424e-01,
-2.1329e-01, -1.3154e-01, -3.2572e-01,
1.1292e-01, 1.2361e-02, -1.5506e-01,
-1.0362e-02, 1.9955e-02, 4.2639e-02,
-2.1952e-02, -2.4682e-02, -2.4453e-02,
-2.5606e-02, -3.3580e-02, -3.6340e-02,
-5.0830e-40, 6.3797e-40, -5.2775e-40,
-7.7988e-40, -7.4579e-40, -5.1901e-40,
-3.8275e-41, -5.7607e-40, -1.3656e-40,
2.7164e-04, 5.9977e-04, 8.6886e-04,
3.0116e-04, 7.0106e-04, 1.0248e-03,
2.9177e-04, 6.4748e-04, 9.4825e-04,
6.6310e-02, 1.5240e-02, -5.3044e-02,
1.2545e-01, 5.0582e-02, 2.7358e-02,
1.9338e-01, 1.1377e-01, 4.6110e-02,
-3.1997e-02, 1.5171e-02, -4.9372e-02,
5.4615e-04, 1.7262e-01, -2.2081e-01,
8.4871e-02, 1.7824e-02, -3.6429e-02,
4.2821e-02, -1.0055e-01, 4.8927e-02,
1.2524e-01, 5.8859e-02, -2.0980e-02,
2.2897e-01, 1.7594e-01, 3.4239e-02,
1.0915e-01, 1.2088e-01, 1.0151e-01,
6.8449e-03, -1.5546e-01, 1.2024e-01,
4.9036e-02, -1.2245e-01, 4.6713e-02,
7.5083e-03, -4.8084e-02, 9.7731e-03,
4.8779e-02, 3.1848e-02, -9.3517e-02,
6.4595e-02, 3.9337e-02, -7.2343e-02,
3.9519e-02, 4.1867e-02, -5.0485e-02,
2.5257e-02, 1.4071e-01, 1.3606e-01,
1.7481e-01, 2.0210e-01, 1.7241e-01,
-7.6295e-40, -7.8460e-40, -4.1806e-41,
-7.9994e-40, -7.3271e-40, -6.2665e-40,
-7.9602e-40, -7.0226e-40, -7.4131e-40,
-4.5544e-04, -5.2379e-04, -7.0755e-04,
-3.3807e-04, -3.8123e-04, -5.3222e-04,
-3.1771e-04, -3.4586e-04, -4.8784e-04,
-3.5257e-02, -1.1866e-02, 1.9717e-02,
-6.0777e-02, -7.3127e-03, -3.2825e-02,
-1.4952e-01, 3.2117e-01, -6.3786e-02,
-1.0255e-02, 1.2961e-01, -8.6823e-02,
1.6994e-01, 4.7491e-01, 2.7135e-01,
2.8538e-03, 1.5572e-01, -3.3736e-02,
8.5996e-02, -1.0176e-02, 2.6629e-02,
7.3362e-02, -7.7525e-03, 5.6261e-02,
1.0819e-01, -2.5863e-01, -5.7146e-03,
-7.1781e-02, 2.8376e-03, 7.8298e-02,
1.3183e-01, 2.7149e-02, -9.9786e-02,
9.0491e-02, 8.7938e-02, -2.1882e-02,
4.1396e-03, -4.5816e-02, -7.8892e-02,
-6.3855e-03, 1.7502e-01, 1.2053e-01,
1.2492e-01, 6.1258e-02, -4.0516e-02,
-4.5409e-02, -4.5877e-02, -7.6414e-02,
-1.0573e-02, -1.2517e-01, -4.3991e-02,
-2.6447e-02, -9.5478e-02, -2.4735e-02,
-4.6548e-41, -1.6443e-40, -3.1221e-40,
-3.2675e-40, -2.7265e-40, -3.1190e-40,
-2.2065e-40, -2.5407e-40, -6.9511e-40,
-1.2727e-04, -2.6585e-04, -3.5516e-04,
3.4272e-05, -1.6810e-04, -3.1677e-04,
-5.5355e-05, -2.9924e-04, -4.3692e-04,
-5.6428e-02, 1.0771e-01, 1.0185e-01,
2.2948e-01, -7.8744e-02, 6.0768e-04,
-2.2355e-03, -2.0128e-03, -5.7317e-03,
-7.1232e-03, 1.0297e-01, 1.6872e-01,
1.9194e-01, -1.1578e-01, 1.0732e-01,
-8.6952e-02, 3.2901e-02, -6.6658e-03,
7.3979e-02, 8.3875e-02, -7.6372e-03,
1.9577e-01, 2.7391e-01, 4.5275e-02,
1.5610e-01, 2.3802e-01, 1.6555e-02,
1.3814e-01, 1.2870e-01, 9.1626e-02,
-4.6890e-02, -8.8734e-02, 7.8866e-02,
1.0027e-01, 2.2139e-01, 1.0050e-01,
-6.5845e-02, -1.0990e-01, -6.9896e-02,
4.1687e-02, 3.0631e-02, -8.8441e-02,
-1.1868e-01, 1.0836e-02, 2.5873e-02,
-1.7114e-02, 7.6295e-02, 1.5439e-02,
-2.4271e-02, 5.8538e-02, 9.8190e-02,
4.9742e-02, 8.7807e-02, 6.5871e-02,
-7.2669e-40, -7.5936e-41, -7.4975e-40,
-1.6984e-42, -1.7334e-40, -8.4954e-41,
-2.1556e-41, -1.5374e-40, -1.5515e-40,
-6.2626e-04, -7.2727e-04, -8.1665e-04,
-5.6584e-04, -6.1190e-04, -6.9584e-04,
-5.6278e-04, -5.8554e-04, -6.3554e-04,
8.1550e-02, -4.1817e-03, 1.2301e-02,
-4.5800e-02, 4.6708e-02, -8.7972e-02,
-2.9880e-01, 2.6456e-01, 3.9363e-03,
-3.0939e-02, -1.9921e-01, -3.8689e-03,
-8.6803e-02, 3.4857e-01, -1.0201e-01,
2.1597e-02, 1.4380e-02, 4.3448e-02,
7.1195e-02, 1.4980e-01, 3.8079e-02,
-1.2678e-01, -8.1274e-02, -4.3445e-02,
5.2482e-02, -1.8763e-01, 1.1557e-01,
-9.4614e-02, 5.4415e-02, -3.1485e-02,
-3.6451e-02, 1.4379e-01, 5.2291e-02,
-9.2069e-02, 9.5675e-02, -5.8433e-02,
7.5768e-03, -7.1280e-02, -1.4576e-01,
-1.4671e-01, -1.2446e-01, -1.5207e-01,
-5.4368e-02, 3.8303e-02, -8.1794e-02,
2.0492e-02, 4.0910e-02, 1.1379e-02,
3.1582e-02, 3.6039e-02, -4.4040e-03,
1.7540e-02, 1.4097e-04, -6.4367e-02,
-7.9553e-40, -5.3941e-40, -7.1912e-40,
-5.8099e-40, -6.8315e-40, -6.6012e-40,
-7.6242e-40, -5.4784e-40, -7.0267e-40,
-2.9197e-04, -2.1994e-04, -1.9501e-04,
-2.6516e-05, -1.2642e-05, -8.4345e-05,
1.6763e-04, 1.1268e-04, -5.4516e-05,
-3.8007e-03, -6.8765e-02, -9.5716e-02,
6.3091e-02, -8.1971e-02, -9.2895e-02,
-6.8353e-03, 7.3639e-02, 1.3505e-01,
9.0083e-02, 2.4352e-01, 3.9708e-02,
-5.4051e-02, -6.8748e-02, -1.8937e-01,
-1.9808e-03, -7.1337e-02, -2.8316e-02,
8.1504e-02, 8.3226e-03, 6.9013e-03,
9.4393e-02, 5.9322e-02, 5.5023e-02,
1.0236e-01, -4.0205e-02, 3.5172e-02,
6.5381e-02, 4.9075e-02, -5.3931e-02,
4.3961e-02, 9.0223e-03, -4.1678e-02,
-6.4262e-02, -5.0304e-02, -9.3597e-02
}
,
{
3.8496e-01, 1.4287e-01, 3.4530e-02,
-5.5398e-01, -6.0381e-02, 1.2078e-02,
7.9983e-02, 2.1478e-01, -5.7915e-02,
-1.4020e-01, -2.6914e-02, 1.5915e-02,
1.2371e-01, 2.5496e-01, -2.9867e-02,
1.3269e-02, -9.9596e-02, -2.3173e-01,
5.1471e-02, -4.5507e-01, -7.7620e-02,
-5.1328e-02, -1.9808e-02, -4.7051e-02,
3.0573e-02, 7.8762e-02, -7.2627e-02,
6.8690e-02, -4.0125e-02, 5.6657e-02,
8.0208e-02, -2.0075e-02, 1.4019e-01,
-5.7959e-02, -7.3152e-02, 2.0202e-02,
-8.8702e-02, -1.9911e-01, -1.5570e-01,
2.8401e-02, 5.8802e-02, 1.3050e-01,
2.1905e-02, -3.4298e-02, 4.0447e-02,
1.0184e-01, -9.0101e-02, -9.2770e-02,
1.1713e-02, -3.2514e-01, 1.9393e-01,
-9.4227e-02, 2.7053e-01, -9.7233e-02,
-1.0478e-01, 6.0652e-02, 8.3399e-02,
1.1104e-01, 2.9008e-01, 4.9208e-02,
-1.5414e-02, 3.1718e-02, -7.9083e-02,
-5.2358e-03, 9.0101e-02, 5.2973e-02,
5.5527e-02, -1.6599e-02, -8.5167e-02,
-5.1018e-02, 7.2243e-03, -9.5684e-02,
-5.0608e-02, -6.7864e-02, -8.9496e-02,
-2.4348e-01, 2.7477e-01, -1.7588e-01,
1.3927e-01, 5.5502e-02, -1.3370e-02,
-4.3509e-02, -2.1511e-01, -5.9070e-02,
1.0293e-01, 4.2678e-01, -8.7527e-02,
-6.8546e-02, -5.6296e-02, -8.7962e-02,
-8.6130e-02, 9.2069e-02, 7.2303e-02,
2.4365e-02, 2.1988e-01, -7.9408e-03,
-3.0063e-02, 1.1554e-01, -5.0311e-02,
1.0605e-02, 5.4598e-02, 1.3826e-02,
-1.4342e-02, 1.5353e-01, -5.3974e-03,
1.5583e-01, -6.0889e-02, -1.5772e-02,
-2.5956e-02, -3.5285e-01, -2.0338e-01,
2.6011e-01, 2.2737e-01, -1.4693e-01,
-7.7964e-02, 1.0053e-01, -5.4278e-02,
-3.0668e-02, 3.4556e-02, -3.4321e-02,
7.8695e-02, -2.2357e-01, 9.5733e-02,
1.7483e-01, -1.5153e-01, -1.8262e-03,
4.7605e-02, -2.2834e-01, 4.6383e-02,
1.5701e-01, 3.2264e-01, 1.0334e-02,
6.3351e-02, 1.1340e-01, 8.3478e-02,
6.4196e-02, 3.3460e-02, 8.8473e-02,
5.4663e-02, -1.7665e-03, -4.1935e-02,
-6.1346e-03, -5.4463e-02, -6.2960e-02,
2.8159e-02, 2.9903e-02, 9.2429e-03,
-3.0041e-02, -9.7783e-02, -4.9500e-02,
9.5350e-02, -7.9143e-02, -1.3244e-01,
-6.5129e-02, 1.4568e-01, 6.6843e-02,
1.5241e-01, -7.8736e-02, 1.0721e-01,
-5.9015e-02, 1.5320e-01, 3.0796e-01,
-5.4266e-03, -6.0804e-02, 3.7326e-02,
7.4844e-02, 4.8340e-02, 1.5251e-01,
3.8158e-02, 1.2087e-01, -8.9003e-02,
-5.8369e-02, -7.3813e-02, 1.2240e-02,
-4.5106e-03, 7.4580e-02, 1.2042e-01,
4.1959e-02, 1.4529e-01, 5.3636e-03,
-4.9708e-03, -1.0775e-02, -5.9374e-02,
1.5358e-02, 1.7277e-02, -1.5412e-01,
8.1647e-02, 3.3503e-02, -8.1934e-02,
-1.5807e-02, -1.0001e-02, -1.0059e-02,
-9.0493e-03, -7.8954e-02, 4.3891e-02,
-9.3815e-03, 3.2241e-02, 4.7962e-02,
-7.2252e-03, 7.9324e-02, 2.0662e-02,
-5.7710e-02, -5.1142e-02, -1.4296e-01,
2.1501e-02, -1.9518e-02, -2.7658e-02,
1.4983e-01, 8.5447e-02, 7.2092e-04,
1.1275e-01, 6.1131e-02, 5.7955e-02,
1.5624e-02, 2.7225e-01, 1.1716e-01,
-1.6322e-04, -1.3368e-04, -1.5575e-04,
-1.0525e-04, -1.0765e-04, -1.5306e-04,
-8.9692e-05, -1.0857e-04, -1.7316e-04,
-1.8015e-03, -1.3733e-03, -3.9154e-04,
-1.8453e-03, -1.4238e-03, -4.4163e-04,
-1.5511e-03, -1.1131e-03, -2.0087e-04,
-2.4082e-03, -2.2576e-03, -1.9231e-03,
-2.4913e-03, -2.4136e-03, -2.1678e-03,
-2.5057e-03, -2.4650e-03, -2.2732e-03,
-2.3901e-05, -1.5870e-05, -5.8255e-06,
-1.5163e-05, -1.2370e-05, -6.0712e-06,
-1.3098e-05, -1.1132e-05, -5.7866e-06,
-5.9760e-03, -5.9998e-03, -6.0295e-03,
-5.9962e-03, -6.0100e-03, -6.0277e-03,
-6.0003e-03, -6.0059e-03, -6.0148e-03,
-3.2764e-05, -2.9574e-05, -2.8001e-05,
-1.0846e-05, -1.1569e-05, -1.4282e-05,
-1.6255e-06, -2.5666e-06, -4.7808e-06,
-5.1999e-03, -5.2334e-03, -5.2847e-03,
-5.2057e-03, -5.2283e-03, -5.2713e-03,
-5.2195e-03, -5.2321e-03, -5.2633e-03,
-3.0782e-06, -9.2118e-06, -1.6177e-05,
-1.6382e-06, -6.9559e-06, -1.4245e-05,
-1.1471e-06, -6.5984e-06, -1.4903e-05,
7.7574e-02, -1.2866e-02, 4.1348e-03,
-6.7298e-02, -1.3691e-01, 6.4079e-02,
3.7962e-02, 8.7737e-02, -4.1046e-02,
-2.8471e-02, 1.7647e-01, 6.4232e-02,
1.2316e-01, 3.6800e-01, -1.5740e-01,
-6.0839e-02, 1.5449e-02, -1.0761e-01,
-6.6869e-02, -1.2867e-01, -4.0195e-02,
-4.9651e-02, -5.5500e-02, -2.5879e-02,
2.0179e-02, 6.8467e-02, 2.6575e-02,
-6.7728e-04, -7.6269e-02, 2.3470e-02,
7.1869e-02, -1.1855e-01, -2.1067e-02,
1.3263e-01, -3.2957e-02, -3.4365e-03,
8.1936e-02, 1.3073e-01, 1.1477e-01,
1.2429e-01, 1.6129e-01, 1.6251e-01,
1.5476e-02, 3.2862e-02, 2.1999e-02,
-2.9189e-02, -3.3615e-02, 5.5616e-04,
-2.4059e-02, -9.6181e-03, -4.1175e-02,
-6.3680e-04, -9.6559e-02, -9.1448e-02,
3.0238e-02, 1.2534e-01, 1.5256e-02,
-4.2118e-02, 1.5723e-01, 2.6929e-03,
1.9873e-02, 5.3050e-02, -1.0153e-03,
2.0634e-02, 9.2825e-03, -6.8027e-03,
3.1335e-03, -7.7443e-03, -1.8307e-02,
7.9974e-03, -1.0283e-03, -6.2520e-03,
4.5050e-02, 9.9504e-02, -1.3404e-01,
-6.7271e-01, -5.7290e-02, 2.6919e-02,
2.3673e-01, 2.4688e-02, -2.0227e-02,
5.1389e-02, -3.9810e-02, -8.9700e-02,
2.8445e-02, 3.9136e-01, -1.1508e-01,
-1.0449e-01, -6.2005e-02, 6.5721e-02,
-1.9123e-01, -4.2613e-02, 3.5371e-02,
1.9207e-01, 8.7916e-02, 4.8089e-02,
-5.7912e-02, 1.0014e-01, -9.4659e-02,
1.1240e-02, -6.2254e-03, 1.3399e-01,
1.6483e-01, -3.5079e-01, 1.1612e-02,
2.9215e-01, 5.6875e-02, 6.9505e-02,
1.3721e-02, 1.2607e-01, 2.6426e-02,
-2.0529e-01, 2.1768e-01, 2.1232e-01,
-6.3574e-02, 2.3504e-02, -1.0811e-01,
-1.3470e-02, -3.6446e-02, -5.4379e-02,
-1.3257e-01, -8.3412e-02, 3.7745e-02,
5.8778e-02, -2.6060e-01, 3.8262e-02,
-4.3689e-03, -6.6703e-02, -2.2025e-01,
-9.0961e-02, 1.3855e-01, 3.4573e-04,
-2.9613e-01, -3.6138e-02, -1.3827e-01,
4.5896e-02, -5.3871e-02, -1.0037e-01,
1.8457e-01, 1.0338e-01, -5.7306e-02,
5.5510e-02, -9.4938e-02, -5.6527e-05,
1.6372e-01, -3.3854e-02, 5.6332e-02,
-4.0251e-01, -5.9428e-02, -9.1470e-02,
-1.5921e-02, -5.7948e-02, 8.1682e-03,
-3.7833e-03, 1.6293e-01, 5.3784e-02,
1.1053e-01, -1.3867e-01, 2.6772e-02,
-1.3133e-02, 3.7614e-01, 3.6361e-03,
-1.4205e-01, 3.1312e-02, -9.9928e-02,
-1.5755e-01, 4.2016e-01, 9.4065e-02,
2.7536e-02, 1.2620e-01, -1.4894e-01,
-4.2137e-02, -9.8700e-02, -1.7479e-01,
4.5836e-02, 5.3893e-02, -1.0138e-01,
8.3609e-02, 2.1849e-02, -1.0648e-01,
7.4801e-02, -1.2671e-01, -1.5007e-02,
2.7440e-01, -3.1351e-01, 6.5787e-02,
-6.7820e-02, 1.6312e-01, -1.3254e-02,
-2.5770e-02, -2.0041e-02, 5.8243e-02,
1.6055e-02, 1.1971e-02, -4.6112e-02,
-1.6276e-01, -1.5313e-02, -7.9826e-03,
9.1668e-02, 9.7722e-02, 1.3754e-01,
-7.4817e-02, -4.1923e-01, -1.2337e-01,
1.3472e-01, -4.0745e-02, -5.4055e-02,
-1.2943e-02, 4.8796e-02, 4.2007e-02,
9.4668e-02, 8.6149e-02, 1.2362e-01,
7.0637e-02, 2.3565e-01, 1.4582e-01,
5.6904e-02, -8.2166e-02, 1.0563e-01,
9.3969e-02, -2.2909e-01, 4.6537e-02,
6.5257e-02, 1.4804e-01, -6.2092e-02,
-1.5699e-02, -1.5303e-02, 1.6671e-01,
-6.1947e-03, 2.5749e-01, 1.5257e-01,
3.2908e-02, -5.9907e-02, 1.1502e-01,
7.5876e-02, -2.6699e-01, -1.5891e-02,
-8.0426e-02, 1.3406e-01, -1.9881e-02,
3.5472e-02, -8.2140e-02, 1.6509e-02,
8.3390e-03, -7.8291e-02, -2.0754e-01,
3.4490e-02, 2.7913e-01, 5.9566e-02,
2.5288e-02, 1.1725e-01, -1.0356e-01,
-5.0955e-02, 9.2093e-02, -5.8477e-02,
4.4325e-02, 3.2973e-02, -1.9477e-01,
3.9582e-02, -8.6877e-02, -1.1753e-01,
3.0401e-02, -2.8757e-02, -2.5563e-02,
5.0741e-02, -3.5056e-01, -2.5584e-01,
9.1709e-02, -4.0932e-02, 2.3812e-01,
5.0945e-02, 4.9246e-02, 1.2738e-01,
5.1440e-03, 1.5703e-01, 5.5743e-02,
-3.9492e-02, 1.2114e-01, 2.0531e-02,
8.0800e-02, 2.6680e-03, -1.6660e-02,
1.0684e-01, 1.2308e-01, 1.7882e-02,
1.8280e-02, 1.0972e-01, -5.2912e-03
}
,
{
-1.3812e-02, -4.6271e-02, 7.3790e-02,
-6.3801e-02, -3.6817e-01, -1.7880e-02,
5.2986e-02, 1.8626e-01, 1.5645e-03,
1.2367e-02, -6.2923e-02, 3.0844e-02,
9.3623e-02, 1.9527e-01, -2.6366e-02,
-2.0837e-02, -3.4424e-02, 4.0256e-02,
4.1482e-02, 6.1795e-02, -1.1293e-02,
-8.9944e-02, -1.3608e-01, 1.8067e-02,
3.6974e-02, 5.2530e-03, -2.7474e-02,
1.1872e-05, 1.9000e-05, 2.0729e-05,
1.0139e-05, 1.6832e-05, 1.9392e-05,
6.5445e-06, 1.0973e-05, 1.3521e-05,
-5.3340e-02, 1.3108e-03, 4.0436e-02,
5.7068e-02, -2.7923e-02, -5.4781e-02,
-2.9293e-02, 2.7145e-02, 2.7340e-02,
5.3520e-03, 1.8766e-02, 4.0297e-01,
2.6473e-02, -3.4675e-02, -1.1783e-01,
-2.5038e-02, -1.7702e-02, -3.4908e-02,
1.4847e-02, 2.3237e-01, -6.3687e-02,
-6.5672e-02, -2.1888e-01, -1.7233e-02,
4.0608e-02, -6.9580e-02, -2.2200e-02,
5.8163e-02, 1.3695e-01, -2.6257e-02,
-1.3328e-01, -3.5730e-01, 2.4507e-02,
-4.5611e-03, 2.0424e-01, -3.9821e-02,
5.5300e-02, -1.6006e-01, 1.1717e-01,
-2.6107e-02, -8.6995e-02, 8.3720e-02,
7.5494e-02, 3.2189e-01, 1.5527e-01,
-6.6869e-02, 1.4469e-01, 5.1805e-02,
9.8760e-02, -1.6759e-01, -1.2350e-01,
5.7005e-02, 8.4904e-02, 8.9713e-02,
-1.4263e-02, 2.8914e-02, 3.2239e-02,
-2.4871e-02, 5.6014e-02, -4.4469e-02,
3.1209e-02, 1.3677e-02, -2.1052e-02,
-1.6548e-03, -1.8796e-03, -1.9883e-03,
-1.6186e-03, -1.8494e-03, -1.9670e-03,
-1.5841e-03, -1.8173e-03, -1.9345e-03,
3.5726e-02, 1.8013e-01, 1.6913e-02,
-1.2168e-01, -6.3848e-02, 3.0555e-02,
3.0269e-02, -1.0260e-01, -1.5259e-02,
-4.7375e-03, 5.5115e-02, 6.2642e-01,
9.9776e-03, -2.1988e-01, -2.0984e-01,
7.0470e-03, 6.3178e-02, -1.3607e-02,
1.1918e-01, -2.4081e-01, 1.7889e-01,
-1.0514e-01, 2.9220e-01, -1.3263e-01,
5.6091e-03, -4.1623e-02, 2.5589e-02,
-1.8496e-01, 2.7698e-02, -6.5768e-02,
2.9677e-01, 4.4163e-02, 5.8530e-02,
-1.1010e-01, -7.6787e-02, 3.9844e-02,
5.2113e-03, -1.8202e-02, 1.4129e-03,
-6.1402e-03, -2.7222e-01, 7.4690e-02,
1.9131e-02, 2.2753e-01, 1.9587e-02,
-2.7391e-02, 6.7917e-03, 2.0496e-03,
6.7333e-02, 7.8262e-02, 2.1110e-03,
-5.4519e-02, 3.0763e-02, 1.5628e-02,
9.5055e-02, 3.8855e-02, 1.2446e-02,
-1.5152e-01, 7.8124e-02, -1.2616e-02,
9.3100e-03, -1.6528e-02, -1.2873e-02,
-1.8377e-03, -1.9231e-03, -1.8930e-03,
-1.8058e-03, -1.8841e-03, -1.8678e-03,
-1.7387e-03, -1.7966e-03, -1.7781e-03,
-4.5122e-02, 1.7027e-03, -3.5534e-03,
8.5222e-03, 1.0130e-01, 4.7893e-02,
6.5574e-02, 7.2150e-03, -2.1820e-03,
-5.5105e-03, -1.8990e-01, 2.6527e-02,
6.6140e-03, 2.1537e-01, -2.2183e-02,
-8.0628e-03, 6.8398e-03, 9.4474e-03,
1.2239e-01, -1.3337e-01, 7.3391e-02,
-1.2205e-01, 1.3145e-01, -2.0063e-02,
2.2168e-02, 3.6097e-03, 2.7146e-02,
4.6717e-02, 2.1122e-02, 1.5491e-02,
-1.3077e-01, 1.1635e-01, 1.0849e-02,
8.0113e-02, -8.4028e-02, 1.2863e-03,
-2.9796e-02, -8.4537e-02, -2.6766e-03,
-7.7771e-03, -2.4274e-03, 8.6274e-02,
-2.0354e-02, 4.1245e-02, 8.4227e-02,
5.5894e-02, 1.0706e-01, 5.2965e-02,
-7.8731e-03, 5.5825e-01, 1.0373e-01,
-1.1975e-01, -2.0071e-02, -2.5286e-02,
-7.7477e-02, 5.3589e-02, -1.5710e-03,
-1.2753e-01, 2.5166e-01, 8.2205e-03,
-9.8349e-02, -4.9539e-02, -5.4941e-02,
-4.9916e-03, -4.9986e-03, -5.0660e-03,
-4.9770e-03, -4.9840e-03, -5.0543e-03,
-4.9997e-03, -5.0114e-03, -5.0809e-03,
6.1819e-02, 1.5061e-01, 1.1984e-02,
1.2905e-01, 2.5921e-01, 1.4768e-01,
4.5548e-02, 1.4902e-01, -4.8961e-03,
-1.3605e-02, 8.2896e-02, -4.1931e-01,
-2.2657e-02, 2.4768e-01, 2.6528e-01,
-1.1566e-02, -8.7819e-03, 4.3618e-02,
-3.4332e-02, -1.8392e-01, 4.4471e-02,
-3.7073e-02, -5.4620e-02, 1.0899e-01,
3.7891e-02, 9.9487e-02, 3.2383e-02,
-6.3628e-02, -5.0303e-03, 5.4617e-02,
-8.7802e-02, 2.1977e-01, -6.0249e-03,
6.3554e-02, -5.4291e-02, -2.6709e-02,
-1.5505e-02, -6.7104e-02, 3.8607e-02,
-1.1427e-01, -3.2524e-01, 4.0077e-02,
-6.5144e-03, 1.2313e-01, -2.7924e-02,
1.4265e-02, -3.8338e-02, 8.6780e-02,
1.5341e-01, 1.2174e-01, -7.3160e-02,
2.6326e-04, 7.3690e-02, 5.2187e-02,
-3.3114e-02, -3.6588e-02, 1.1635e-02,
-3.3521e-02, 1.0767e-01, -8.9125e-03,
-2.2431e-02, -4.5655e-03, 7.5531e-03,
6.7227e-04, 7.2856e-04, 7.3907e-04,
6.5335e-04, 7.0702e-04, 7.1233e-04,
6.1540e-04, 6.7286e-04, 6.7797e-04,
-3.1496e-02, 6.0514e-02, 4.2013e-02,
-2.8617e-02, 1.4846e-02, 4.0016e-03,
4.7006e-03, -4.0017e-02, -3.0411e-02,
-9.6037e-03, 8.8522e-02, 9.8616e-02,
4.1297e-02, -3.2645e-01, -7.6144e-03,
-1.0711e-02, 3.9324e-02, 4.0144e-02,
5.2899e-02, -7.8668e-02, -5.4798e-02,
-2.0428e-01, 5.7238e-02, -3.6937e-02,
-3.6103e-02, -8.2683e-02, -2.8101e-02,
8.2479e-02, 5.7766e-02, -1.2019e-01,
-3.8373e-01, 6.8272e-02, -1.1758e-02,
5.1129e-02, -2.7931e-01, 4.5608e-02,
-2.5151e-02, -5.0816e-02, 1.7231e-02,
-3.6376e-02, 1.5916e-01, 2.9192e-02,
-4.1947e-02, 5.3183e-02, -9.7289e-02,
4.6138e-02, 7.0842e-02, 1.6673e-02,
-1.7243e-03, 2.7203e-01, 3.8262e-02,
-1.4000e-01, -7.3793e-02, -2.0050e-02,
-1.8750e-02, -8.5319e-02, -3.0858e-02,
-5.9981e-02, 1.2729e-01, 1.4094e-02,
-5.4088e-02, -2.3694e-02, -9.7485e-03,
-4.7840e-03, -4.8359e-03, -4.8727e-03,
-4.7882e-03, -4.8380e-03, -4.8755e-03,
-4.7859e-03, -4.8321e-03, -4.8633e-03,
4.9511e-02, 1.0935e-01, -3.7430e-03,
1.1834e-01, 7.7243e-02, 4.3074e-02,
6.7446e-02, 2.9734e-02, -1.1276e-02,
-2.0080e-02, 1.3561e-01, -1.3455e-01,
-1.4505e-02, 2.2100e-01, 4.9635e-02,
-1.0040e-02, 3.4560e-02, -7.4607e-03,
-6.8873e-02, -5.6221e-02, 1.2255e-02,
-2.9198e-02, 7.1612e-02, 2.9402e-02,
4.1036e-02, 4.6417e-02, 6.0284e-03,
-6.5261e-02, 2.1426e-03, 2.4192e-02,
-1.6073e-03, -6.2222e-03, -1.8295e-02,
2.4952e-04, -2.0623e-02, -3.3064e-03,
5.9188e-02, -4.8839e-02, 7.9840e-02,
-6.7952e-02, -4.7191e-01, 1.5117e-01,
1.5668e-01, 2.4733e-01, 1.1354e-01,
1.7742e-02, -4.4059e-02, 9.5374e-03,
3.2049e-01, -1.3779e-01, 9.6608e-02,
8.4580e-02, 1.4293e-01, 6.1574e-02,
2.8777e-03, 7.8795e-02, -5.1902e-02,
1.2212e-01, 1.0321e-01, 3.2360e-02,
-9.6617e-02, 7.8941e-03, -7.0876e-02,
3.5869e-03, 3.5891e-03, 3.5923e-03,
3.5746e-03, 3.5840e-03, 3.5967e-03,
3.5785e-03, 3.5932e-03, 3.6080e-03,
1.5454e-03, 3.0582e-03, 4.3737e-02,
-5.9833e-02, -1.1247e-01, 4.4380e-02,
-1.3206e-01, 8.2778e-03, 4.7963e-02,
-4.3720e-02, -7.5722e-03, 2.0510e-01,
3.0133e-02, -4.0506e-01, 2.7867e-01,
5.5586e-02, 2.8926e-02, 1.3360e-03,
1.9490e-05, 3.3326e-01, -7.7241e-02,
-1.5648e-01, 1.5195e-01, -1.3995e-01,
8.6519e-02, 1.0447e-01, -4.1413e-02,
-3.8667e-03, 1.6159e-01, 1.1627e-01,
-2.2646e-01, -3.4758e-02, -6.7956e-03,
-3.2689e-01, 1.9606e-01, -9.1523e-02,
1.1238e-02, 1.5084e-03, 4.2113e-02,
-1.1154e-02, -3.6596e-01, -7.2252e-02,
6.6621e-02, 1.0188e-01, 4.1032e-01,
3.5892e-02, -4.8304e-02, 6.6142e-03,
1.3374e-01, 2.2720e-01, -7.1224e-02,
6.8952e-02, 2.0467e-01, 5.0251e-02,
-6.2016e-02, 2.2175e-01, -1.7764e-02,
2.7542e-02, 1.4905e-01, 3.6637e-02,
-7.2231e-02, 5.0271e-03, -7.1823e-02,
3.5760e-03, 3.5540e-03, 3.5692e-03,
3.5664e-03, 3.5490e-03, 3.5689e-03,
3.5671e-03, 3.5619e-03, 3.5864e-03,
2.7470e-02, -3.9752e-02, 4.1063e-02,
-2.4985e-02, -1.7969e-01, 8.2186e-02,
-5.4251e-02, -5.9651e-03, 2.5079e-02,
-2.1197e-02, 2.5426e-02, 1.3585e-01,
-1.3460e-02, -1.1377e-01, 1.2278e-01,
3.6533e-02, 1.2843e-02, 5.6219e-02,
5.8141e-04, 2.8354e-01, -6.2016e-02,
-1.0289e-01, 1.8724e-01, -9.9475e-02,
5.1193e-02, 7.5986e-02, -1.2951e-03,
-8.2587e-02, 1.8498e-01, 1.0891e-01,
1.3538e-01, -4.7728e-01, 1.0868e-01,
-8.6415e-02, -1.7061e-01, 1.0457e-02
}
};
static __device__ __constant__ const float HDNL3biasL[8][8] =
{
{
-0.1175, -0.0258, -0.0053, -0.0437, -0.0563, -0.1047, -0.3449, 0.0568
}
,
{
0.0339, -0.1738, 0.0061, 0.1565, -0.0316, -0.0016, -0.0032, -0.0554
}
,
{
-0.0508, -0.0609, 0.0347, -0.0802, -0.0438, 0.2512, -0.0491, -0.0259
}
,
{
0.0655, 0.0255, 0.0228, -0.0027, -0.0155, -0.0163, -0.0174, -0.1095
}
,
{
4.9947e-03, 5.3372e-03, -4.5286e-09, -1.3756e-03, 3.8858e-03, -4.4197e-02, 3.3970e-02, 2.8411e-02
}
,
{
-0.0396, 0.0007, 0.1735, 0.0109, 0.1177, 0.0919, 0.0567, -0.0005
}
,
{
0.0127, -0.0688, 0.1102, -0.0052, 0.1602, -0.0191, -0.0322, 0.0311
}
,
{
0.0063, 0.0093, 0.0729, 0.3734, 0.0006, 0.1915, 0.3186, 0.2636
}
};
static __device__ __constant__ const float HDNL3kernelsL10[4 * 8] =
{
-0.0967, -0.3094,
0.3537, 0.5705,
0.2547, 0.3360,
-0.0718, -0.0700,
-0.3013, -0.1602,
0.4520, 0.0495,
0.1564, 0.3773,
-0.0216, 0.4367,
-0.4855, -0.1972,
-0.2026, -0.4390,
0.3743, -0.1156,
0.4408, -0.3123,
-0.3577, 0.0753,
-0.3396, 0.0336,
0.1052, -0.4180,
0.0799, -0.3587
};
__global__ static void conv1To8HDNL0(
cudaTextureObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float tl = tex2D<float>(srcImg, x - 1, y - 1);
float tc = tex2D<float>(srcImg, x, y - 1);
float tr = tex2D<float>(srcImg, x + 1, y - 1);
float ml = tex2D<float>(srcImg, x - 1, y);
float mc = tex2D<float>(srcImg, x, y);
float mr = tex2D<float>(srcImg, x + 1, y);
float bl = tex2D<float>(srcImg, x - 1, y + 1);
float bc = tex2D<float>(srcImg, x, y + 1);
float br = tex2D<float>(srcImg, x + 1, y + 1);
float4 c1234 = make_float4(
RELU(CHANNEL1TO8(0, 0)),
RELU(CHANNEL1TO8(1, 0)),
RELU(CHANNEL1TO8(2, 0)),
RELU(CHANNEL1TO8(3, 0))
);
float4 c5678 = make_float4(
RELU(CHANNEL1TO8(4, 0)),
RELU(CHANNEL1TO8(5, 0)),
RELU(CHANNEL1TO8(6, 0)),
RELU(CHANNEL1TO8(7, 0))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, cudaBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, cudaBoundaryModeZero);
}
__global__ static void conv1To8HDNL1(
cudaTextureObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float tl = tex2D<float>(srcImg, x - 1, y - 1);
float tc = tex2D<float>(srcImg, x, y - 1);
float tr = tex2D<float>(srcImg, x + 1, y - 1);
float ml = tex2D<float>(srcImg, x - 1, y);
float mc = tex2D<float>(srcImg, x, y);
float mr = tex2D<float>(srcImg, x + 1, y);
float bl = tex2D<float>(srcImg, x - 1, y + 1);
float bc = tex2D<float>(srcImg, x, y + 1);
float br = tex2D<float>(srcImg, x + 1, y + 1);
float4 c1234 = make_float4(
RELU(CHANNEL1TO8(0, 1)),
RELU(CHANNEL1TO8(1, 1)),
RELU(CHANNEL1TO8(2, 1)),
RELU(CHANNEL1TO8(3, 1))
);
float4 c5678 = make_float4(
RELU(CHANNEL1TO8(4, 1)),
RELU(CHANNEL1TO8(5, 1)),
RELU(CHANNEL1TO8(6, 1)),
RELU(CHANNEL1TO8(7, 1))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, cudaBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, cudaBoundaryModeZero);
}
__global__ static void conv1To8HDNL2(
cudaTextureObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float tl = tex2D<float>(srcImg, x - 1, y - 1);
float tc = tex2D<float>(srcImg, x, y - 1);
float tr = tex2D<float>(srcImg, x + 1, y - 1);
float ml = tex2D<float>(srcImg, x - 1, y);
float mc = tex2D<float>(srcImg, x, y);
float mr = tex2D<float>(srcImg, x + 1, y);
float bl = tex2D<float>(srcImg, x - 1, y + 1);
float bc = tex2D<float>(srcImg, x, y + 1);
float br = tex2D<float>(srcImg, x + 1, y + 1);
float4 c1234 = make_float4(
RELU(CHANNEL1TO8(0, 2)),
RELU(CHANNEL1TO8(1, 2)),
RELU(CHANNEL1TO8(2, 2)),
RELU(CHANNEL1TO8(3, 2))
);
float4 c5678 = make_float4(
RELU(CHANNEL1TO8(4, 2)),
RELU(CHANNEL1TO8(5, 2)),
RELU(CHANNEL1TO8(6, 2)),
RELU(CHANNEL1TO8(7, 2))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, cudaBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, cudaBoundaryModeZero);
}
__global__ static void conv1To8HDNL3(
cudaTextureObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float tl = tex2D<float>(srcImg, x - 1, y - 1);
float tc = tex2D<float>(srcImg, x, y - 1);
float tr = tex2D<float>(srcImg, x + 1, y - 1);
float ml = tex2D<float>(srcImg, x - 1, y);
float mc = tex2D<float>(srcImg, x, y);
float mr = tex2D<float>(srcImg, x + 1, y);
float bl = tex2D<float>(srcImg, x - 1, y + 1);
float bc = tex2D<float>(srcImg, x, y + 1);
float br = tex2D<float>(srcImg, x + 1, y + 1);
float4 c1234 = make_float4(
RELU(CHANNEL1TO8(0, 3)),
RELU(CHANNEL1TO8(1, 3)),
RELU(CHANNEL1TO8(2, 3)),
RELU(CHANNEL1TO8(3, 3))
);
float4 c5678 = make_float4(
RELU(CHANNEL1TO8(4, 3)),
RELU(CHANNEL1TO8(5, 3)),
RELU(CHANNEL1TO8(6, 3)),
RELU(CHANNEL1TO8(7, 3))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, cudaBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, cudaBoundaryModeZero);
}
__global__ static void conv8To8HDNL0(
cudaSurfaceObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H, int L)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float4 tl1, tc1, tr1, ml1, mc1, mr1, bl1, bc1, br1;
float4 tl2, tc2, tr2, ml2, mc2, mr2, bl2, bc2, br2;
surf2DLayeredread(&tl1, srcImg, __umul24(sizeof(mc1), x - 1), y - 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&tc1, srcImg, __umul24(sizeof(mc1), x), y - 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&tr1, srcImg, __umul24(sizeof(mc1), x + 1), y - 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&ml1, srcImg, __umul24(sizeof(mc1), x - 1), y, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), x), y, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mr1, srcImg, __umul24(sizeof(mc1), x + 1), y, 0, cudaBoundaryModeZero);
surf2DLayeredread(&bl1, srcImg, __umul24(sizeof(mc1), x - 1), y + 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&bc1, srcImg, __umul24(sizeof(mc1), x), y + 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&br1, srcImg, __umul24(sizeof(mc1), x + 1), y + 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&tl2, srcImg, __umul24(sizeof(mc2), x - 1), y - 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&tc2, srcImg, __umul24(sizeof(mc2), x), y - 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&tr2, srcImg, __umul24(sizeof(mc2), x + 1), y - 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&ml2, srcImg, __umul24(sizeof(mc2), x - 1), y, 1, cudaBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), x), y, 1, cudaBoundaryModeZero);
surf2DLayeredread(&mr2, srcImg, __umul24(sizeof(mc2), x + 1), y, 1, cudaBoundaryModeZero);
surf2DLayeredread(&bl2, srcImg, __umul24(sizeof(mc2), x - 1), y + 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&bc2, srcImg, __umul24(sizeof(mc2), x), y + 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&br2, srcImg, __umul24(sizeof(mc2), x + 1), y + 1, 1, cudaBoundaryModeZero);
float4 c1234 = make_float4(
RELU(CHANNEL8TO8(0, 0)),
RELU(CHANNEL8TO8(1, 0)),
RELU(CHANNEL8TO8(2, 0)),
RELU(CHANNEL8TO8(3, 0))
);
float4 c5678 = make_float4(
RELU(CHANNEL8TO8(4, 0)),
RELU(CHANNEL8TO8(5, 0)),
RELU(CHANNEL8TO8(6, 0)),
RELU(CHANNEL8TO8(7, 0))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, cudaBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, cudaBoundaryModeZero);
}
__global__ static void conv8To8HDNL1(
cudaSurfaceObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H, int L)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float4 tl1, tc1, tr1, ml1, mc1, mr1, bl1, bc1, br1;
float4 tl2, tc2, tr2, ml2, mc2, mr2, bl2, bc2, br2;
surf2DLayeredread(&tl1, srcImg, __umul24(sizeof(mc1), x - 1), y - 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&tc1, srcImg, __umul24(sizeof(mc1), x), y - 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&tr1, srcImg, __umul24(sizeof(mc1), x + 1), y - 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&ml1, srcImg, __umul24(sizeof(mc1), x - 1), y, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), x), y, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mr1, srcImg, __umul24(sizeof(mc1), x + 1), y, 0, cudaBoundaryModeZero);
surf2DLayeredread(&bl1, srcImg, __umul24(sizeof(mc1), x - 1), y + 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&bc1, srcImg, __umul24(sizeof(mc1), x), y + 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&br1, srcImg, __umul24(sizeof(mc1), x + 1), y + 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&tl2, srcImg, __umul24(sizeof(mc2), x - 1), y - 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&tc2, srcImg, __umul24(sizeof(mc2), x), y - 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&tr2, srcImg, __umul24(sizeof(mc2), x + 1), y - 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&ml2, srcImg, __umul24(sizeof(mc2), x - 1), y, 1, cudaBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), x), y, 1, cudaBoundaryModeZero);
surf2DLayeredread(&mr2, srcImg, __umul24(sizeof(mc2), x + 1), y, 1, cudaBoundaryModeZero);
surf2DLayeredread(&bl2, srcImg, __umul24(sizeof(mc2), x - 1), y + 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&bc2, srcImg, __umul24(sizeof(mc2), x), y + 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&br2, srcImg, __umul24(sizeof(mc2), x + 1), y + 1, 1, cudaBoundaryModeZero);
float4 c1234 = make_float4(
RELU(CHANNEL8TO8(0, 1)),
RELU(CHANNEL8TO8(1, 1)),
RELU(CHANNEL8TO8(2, 1)),
RELU(CHANNEL8TO8(3, 1))
);
float4 c5678 = make_float4(
RELU(CHANNEL8TO8(4, 1)),
RELU(CHANNEL8TO8(5, 1)),
RELU(CHANNEL8TO8(6, 1)),
RELU(CHANNEL8TO8(7, 1))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, cudaBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, cudaBoundaryModeZero);
}
__global__ static void conv8To8HDNL2(
cudaSurfaceObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H, int L)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float4 tl1, tc1, tr1, ml1, mc1, mr1, bl1, bc1, br1;
float4 tl2, tc2, tr2, ml2, mc2, mr2, bl2, bc2, br2;
surf2DLayeredread(&tl1, srcImg, __umul24(sizeof(mc1), x - 1), y - 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&tc1, srcImg, __umul24(sizeof(mc1), x), y - 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&tr1, srcImg, __umul24(sizeof(mc1), x + 1), y - 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&ml1, srcImg, __umul24(sizeof(mc1), x - 1), y, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), x), y, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mr1, srcImg, __umul24(sizeof(mc1), x + 1), y, 0, cudaBoundaryModeZero);
surf2DLayeredread(&bl1, srcImg, __umul24(sizeof(mc1), x - 1), y + 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&bc1, srcImg, __umul24(sizeof(mc1), x), y + 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&br1, srcImg, __umul24(sizeof(mc1), x + 1), y + 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&tl2, srcImg, __umul24(sizeof(mc2), x - 1), y - 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&tc2, srcImg, __umul24(sizeof(mc2), x), y - 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&tr2, srcImg, __umul24(sizeof(mc2), x + 1), y - 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&ml2, srcImg, __umul24(sizeof(mc2), x - 1), y, 1, cudaBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), x), y, 1, cudaBoundaryModeZero);
surf2DLayeredread(&mr2, srcImg, __umul24(sizeof(mc2), x + 1), y, 1, cudaBoundaryModeZero);
surf2DLayeredread(&bl2, srcImg, __umul24(sizeof(mc2), x - 1), y + 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&bc2, srcImg, __umul24(sizeof(mc2), x), y + 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&br2, srcImg, __umul24(sizeof(mc2), x + 1), y + 1, 1, cudaBoundaryModeZero);
float4 c1234 = make_float4(
RELU(CHANNEL8TO8(0, 2)),
RELU(CHANNEL8TO8(1, 2)),
RELU(CHANNEL8TO8(2, 2)),
RELU(CHANNEL8TO8(3, 2))
);
float4 c5678 = make_float4(
RELU(CHANNEL8TO8(4, 2)),
RELU(CHANNEL8TO8(5, 2)),
RELU(CHANNEL8TO8(6, 2)),
RELU(CHANNEL8TO8(7, 2))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, cudaBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, cudaBoundaryModeZero);
}
__global__ static void conv8To8HDNL3(
cudaSurfaceObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H, int L)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
float4 tl1, tc1, tr1, ml1, mc1, mr1, bl1, bc1, br1;
float4 tl2, tc2, tr2, ml2, mc2, mr2, bl2, bc2, br2;
surf2DLayeredread(&tl1, srcImg, __umul24(sizeof(mc1), x - 1), y - 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&tc1, srcImg, __umul24(sizeof(mc1), x), y - 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&tr1, srcImg, __umul24(sizeof(mc1), x + 1), y - 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&ml1, srcImg, __umul24(sizeof(mc1), x - 1), y, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), x), y, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mr1, srcImg, __umul24(sizeof(mc1), x + 1), y, 0, cudaBoundaryModeZero);
surf2DLayeredread(&bl1, srcImg, __umul24(sizeof(mc1), x - 1), y + 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&bc1, srcImg, __umul24(sizeof(mc1), x), y + 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&br1, srcImg, __umul24(sizeof(mc1), x + 1), y + 1, 0, cudaBoundaryModeZero);
surf2DLayeredread(&tl2, srcImg, __umul24(sizeof(mc2), x - 1), y - 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&tc2, srcImg, __umul24(sizeof(mc2), x), y - 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&tr2, srcImg, __umul24(sizeof(mc2), x + 1), y - 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&ml2, srcImg, __umul24(sizeof(mc2), x - 1), y, 1, cudaBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), x), y, 1, cudaBoundaryModeZero);
surf2DLayeredread(&mr2, srcImg, __umul24(sizeof(mc2), x + 1), y, 1, cudaBoundaryModeZero);
surf2DLayeredread(&bl2, srcImg, __umul24(sizeof(mc2), x - 1), y + 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&bc2, srcImg, __umul24(sizeof(mc2), x), y + 1, 1, cudaBoundaryModeZero);
surf2DLayeredread(&br2, srcImg, __umul24(sizeof(mc2), x + 1), y + 1, 1, cudaBoundaryModeZero);
float4 c1234 = make_float4(
RELU(CHANNEL8TO8(0, 3)),
RELU(CHANNEL8TO8(1, 3)),
RELU(CHANNEL8TO8(2, 3)),
RELU(CHANNEL8TO8(3, 3))
);
float4 c5678 = make_float4(
RELU(CHANNEL8TO8(4, 3)),
RELU(CHANNEL8TO8(5, 3)),
RELU(CHANNEL8TO8(6, 3)),
RELU(CHANNEL8TO8(7, 3))
);
surf2DLayeredwrite(c1234, dstImg, __umul24(sizeof(c1234), x), y, 0, cudaBoundaryModeZero);
surf2DLayeredwrite(c5678, dstImg, __umul24(sizeof(c5678), x), y, 1, cudaBoundaryModeZero);
}
__global__ static void convTranspose8To1HDNL0(
cudaSurfaceObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, cudaBoundaryModeZero);
uchar c = clamp(
mc1.x * HDNL0kernelsL10[0 + index] +
mc1.y * HDNL0kernelsL10[4 + index] +
mc1.z * HDNL0kernelsL10[8 + index] +
mc1.w * HDNL0kernelsL10[12 + index] +
mc2.x * HDNL0kernelsL10[16 + index] +
mc2.y * HDNL0kernelsL10[20 + index] +
mc2.z * HDNL0kernelsL10[24 + index] +
mc2.w * HDNL0kernelsL10[28 + index], 0.0f, 1.0f) * 255.0f + 0.5f;
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, cudaBoundaryModeZero);
}
__global__ static void convTranspose8To1HDNL1(
cudaSurfaceObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, cudaBoundaryModeZero);
uchar c = clamp(
mc1.x * HDNL1kernelsL10[0 + index] +
mc1.y * HDNL1kernelsL10[4 + index] +
mc1.z * HDNL1kernelsL10[8 + index] +
mc1.w * HDNL1kernelsL10[12 + index] +
mc2.x * HDNL1kernelsL10[16 + index] +
mc2.y * HDNL1kernelsL10[20 + index] +
mc2.z * HDNL1kernelsL10[24 + index] +
mc2.w * HDNL1kernelsL10[28 + index], 0.0f, 1.0f) * 255.0f + 0.5f;
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, cudaBoundaryModeZero);
}
__global__ static void convTranspose8To1HDNL2(
cudaSurfaceObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, cudaBoundaryModeZero);
uchar c = clamp(
mc1.x * HDNL2kernelsL10[0 + index] +
mc1.y * HDNL2kernelsL10[4 + index] +
mc1.z * HDNL2kernelsL10[8 + index] +
mc1.w * HDNL2kernelsL10[12 + index] +
mc2.x * HDNL2kernelsL10[16 + index] +
mc2.y * HDNL2kernelsL10[20 + index] +
mc2.z * HDNL2kernelsL10[24 + index] +
mc2.w * HDNL2kernelsL10[28 + index], 0.0f, 1.0f) * 255.0f + 0.5f;
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, cudaBoundaryModeZero);
}
__global__ static void convTranspose8To1HDNL3(
cudaSurfaceObject_t srcImg, cudaSurfaceObject_t dstImg,
int W, int H)
{
const unsigned int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
const unsigned int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x >= W || y >= H)
return;
int index = (y & 1) * 2 + (x & 1);
float4 mc1, mc2;
const unsigned int srcX = x / 2, srcY = y / 2;
surf2DLayeredread(&mc1, srcImg, __umul24(sizeof(mc1), srcX), srcY, 0, cudaBoundaryModeZero);
surf2DLayeredread(&mc2, srcImg, __umul24(sizeof(mc2), srcX), srcY, 1, cudaBoundaryModeZero);
uchar c = clamp(
mc1.x * HDNL3kernelsL10[0 + index] +
mc1.y * HDNL3kernelsL10[4 + index] +
mc1.z * HDNL3kernelsL10[8 + index] +
mc1.w * HDNL3kernelsL10[12 + index] +
mc2.x * HDNL3kernelsL10[16 + index] +
mc2.y * HDNL3kernelsL10[20 + index] +
mc2.z * HDNL3kernelsL10[24 + index] +
mc2.w * HDNL3kernelsL10[28 + index], 0.0f, 1.0f) * 255.0f + 0.5f;
surf2Dwrite(c, dstImg, __umul24(sizeof(c), x), y, cudaBoundaryModeZero);
}
void cuRunKernelACNet(const unsigned char* inputData, unsigned char* outputData, ACCudaParamACNet * param)
{
cudaError_t err = cudaSuccess;
cudaChannelFormatDesc inoutChannelDesc = cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsigned);
cudaChannelFormatDesc tmpChannelDesc = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindFloat);
cudaExtent extent = make_cudaExtent(param->orgW, param->orgH, 2);
const size_t W = 2 * param->orgW, H = 2 * param->orgH;
cudaArray_t cuInputArray;
err = cudaMallocArray(&cuInputArray, &inoutChannelDesc,
param->orgW, param->orgH);
CheckCudaErr(err);
cudaArray_t cuArray1;
err = cudaMalloc3DArray(&cuArray1, &tmpChannelDesc, extent,
cudaArraySurfaceLoadStore | cudaArrayLayered);
CheckCudaErr(err);
cudaArray_t cuArray2;
err = cudaMalloc3DArray(&cuArray2, &tmpChannelDesc, extent,
cudaArraySurfaceLoadStore | cudaArrayLayered);
CheckCudaErr(err);
cudaArray_t cuOutputArray;
err = cudaMallocArray(&cuOutputArray, &inoutChannelDesc,
W, H, cudaArraySurfaceLoadStore);
CheckCudaErr(err);
struct cudaResourceDesc resDesc;
struct cudaTextureDesc texDesc;
memset(&resDesc, 0, sizeof(resDesc));
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeBorder;
texDesc.addressMode[1] = cudaAddressModeBorder;
texDesc.readMode = cudaReadModeNormalizedFloat;
texDesc.normalizedCoords = 0;
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = cuInputArray;
cudaTextureObject_t inTex = 0;
err = cudaCreateTextureObject(&inTex, &resDesc, &texDesc, NULL);
CheckCudaErr(err);
resDesc.res.array.array = cuArray1;
cudaSurfaceObject_t surf1 = 0;
err = cudaCreateSurfaceObject(&surf1, &resDesc);
CheckCudaErr(err);
resDesc.res.array.array = cuArray2;
cudaSurfaceObject_t surf2 = 0;
err = cudaCreateSurfaceObject(&surf2, &resDesc);
CheckCudaErr(err);
resDesc.res.array.array = cuOutputArray;
cudaSurfaceObject_t outSurf = 0;
err = cudaCreateSurfaceObject(&outSurf, &resDesc);
CheckCudaErr(err);
err = cudaMemcpy2DToArray(cuInputArray, 0, 0, inputData,
param->orgW, param->orgW, param->orgH,
cudaMemcpyHostToDevice);
CheckCudaErr(err);
dim3 dimBlock(16, 16);
dim3 dimGrid(
(param->orgW + dimBlock.x - 1) / dimBlock.x,
(param->orgH + dimBlock.y - 1) / dimBlock.y
);
dim3 dimGridout(
(param->orgW * 2 + dimBlock.x - 1) / dimBlock.x,
(param->orgH * 2 + dimBlock.y - 1) / dimBlock.y
);
switch (param->HDNLevel)
{
case 0:
RUNKERNEL(0)
break;
case 1:
RUNKERNEL(1)
break;
case 2:
RUNKERNEL(2)
break;
case 3:
RUNKERNEL(3)
break;
default:
RUNKERNEL(0)
break;
}
err = cudaMemcpy2DFromArray(outputData, param->orgW * 2,
cuOutputArray, 0, 0, W, H,
cudaMemcpyDeviceToHost);
CheckCudaErr(err);
cudaDestroyTextureObject(inTex);
cudaDestroySurfaceObject(surf1);
cudaDestroySurfaceObject(surf2);
cudaDestroySurfaceObject(outSurf);
cudaFreeArray(cuInputArray);
cudaFreeArray(cuArray1);
cudaFreeArray(cuArray2);
cudaFreeArray(cuOutputArray);
}
|
a73974292ee26ab7ac5d4edf4686cd1e442d06df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2017 Darius Rckert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/cuda/imageProcessing/convolution.h"
#include "saiga/cuda/tests/test_helper.h"
#include "saiga/cuda/tests/test.h"
#include "saiga/cuda/thread_info.h"
#include "saiga/cuda/cudaHelper.h"
#include "saiga/time/timer.h"
using std::cout;
using std::endl;
namespace Saiga {
namespace CUDA {
__constant__ float d_Kernel[MAX_RADIUS*2+1];
#define KERNEL_RADIUS 4
////////////////////////////////////////////////////////////////////////////////
// Row convolution filter
////////////////////////////////////////////////////////////////////////////////
#define ROWS_BLOCKDIM_X 128
#define ROWS_BLOCKDIM_Y 1
#define ROWS_RESULT_STEPS 8
#define ROWS_HALO_STEPS 1
__global__ void convolutionRowsKernel2(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int pitch
)
{
__shared__ float s_Data[ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X];
//Offset to the left halo edge
const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y;
d_Src += baseY * pitch + baseX;
d_Dst += baseY * pitch + baseX;
//Load main data
#pragma unroll
for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++)
{
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = d_Src[i * ROWS_BLOCKDIM_X];
}
//Load left halo
#pragma unroll
for (int i = 0; i < ROWS_HALO_STEPS; i++)
{
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
}
//Load right halo
#pragma unroll
for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++)
{
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
}
//Compute and store results
__syncthreads();
#pragma unroll
for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++)
{
float sum = 0;
#pragma unroll
for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++)
{
sum += d_Kernel[KERNEL_RADIUS + j] * s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j];
}
d_Dst[i * ROWS_BLOCKDIM_X] = sum;
}
}
template<typename T, int RADIUS, int BLOCK_W, int BLOCK_H>
__global__ void convolutionRowsKernel(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int pitch
)
{
using vector_type = int2;
const int elements_per_vector = sizeof(vector_type) / sizeof(T);
const int shared_block_width = BLOCK_W + ( KERNEL_RADIUS * 2 / elements_per_vector);
//estimated shared memory per block = 1 * (128+ (4*2/2)) * sizeof(int2) = 512 * sizeof(int2) = 2048
//per mp: 2048 * (2048/128) = 2048 * 16 = 32768 (=100% occupancy)
__shared__ vector_type s_Data[BLOCK_H*shared_block_width];
vector_type* src2 = reinterpret_cast<vector_type*>(d_Src);
vector_type* dest2 = reinterpret_cast<vector_type*>(d_Dst);
T* s_Data2 = reinterpret_cast<T*>(s_Data);
int imageWV = imageW / elements_per_vector;
int pitchV = pitch / elements_per_vector;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int xp = blockIdx.x*BLOCK_W + tx;
const int yp = blockIdx.y*BLOCK_H + ty;
// const int baseX = blockIdx.x*BLOCK_W - KERNEL_RADIUS;
const int baseXV = blockIdx.x * BLOCK_W - KERNEL_RADIUS / elements_per_vector;
for (int i = threadIdx.x; i < BLOCK_W + (KERNEL_RADIUS * 2 / elements_per_vector); i+=BLOCK_W)
{
int x = baseXV + i;
x = min(max(0,x),imageWV-1);
auto v = src2[yp * pitchV + x];
if(baseXV + i < 0){
v.y = v.x;
}
if(baseXV + i >= imageWV){
v.x = v.y;
}
s_Data[threadIdx.y*shared_block_width+i] = v;
}
//Compute and store results
__syncthreads();
T sum[2];
sum[0] = 0;
sum[1] = 0;
for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++)
{
auto xoffset = threadIdx.x * elements_per_vector;
auto yoffset = threadIdx.y * shared_block_width * elements_per_vector;
sum[0] += d_Kernel[KERNEL_RADIUS + j] * s_Data2[yoffset + xoffset + (KERNEL_RADIUS + j)];
sum[1] += d_Kernel[KERNEL_RADIUS + j] * s_Data2[yoffset + xoffset + (KERNEL_RADIUS + j) + 1];
}
dest2[yp * pitchV + xp] = *reinterpret_cast<vector_type*>(sum);
}
extern "C" void convolutionRowsGPU(
float *d_Dst,
float *d_Src,
int imageW,
int imageH
)
{
assert(ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= KERNEL_RADIUS);
assert(imageW % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0);
assert(imageH % ROWS_BLOCKDIM_Y == 0);
dim3 blocks(imageW / (ROWS_BLOCKDIM_X) / 2, imageH / ROWS_BLOCKDIM_Y);
// dim3 blocks(16,1);
dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y);
hipLaunchKernelGGL(( convolutionRowsKernel<float,KERNEL_RADIUS,ROWS_BLOCKDIM_X,ROWS_BLOCKDIM_Y>)
, dim3(blocks), dim3(threads), 0, 0,
d_Dst,
d_Src,
imageW,
imageH,
imageW
);
CUDA_SYNC_CHECK_ERROR();
}
////////////////////////////////////////////////////////////////////////////////
// Column convolution filter
////////////////////////////////////////////////////////////////////////////////
#define COLUMNS_BLOCKDIM_X 4
#define COLUMNS_BLOCKDIM_Y 64
#define COLUMNS_RESULT_STEPS 8
#define COLUMNS_HALO_STEPS 1
__global__ void convolutionColumnsKernel2(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int pitch
)
{
__shared__ float s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1];
//Offset to the upper halo edge
const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x;
const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y;
d_Src += baseY * pitch + baseX;
d_Dst += baseY * pitch + baseX;
//Main data
#pragma unroll
for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++)
{
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch];
}
//Upper halo
#pragma unroll
for (int i = 0; i < COLUMNS_HALO_STEPS; i++)
{
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0;
}
//Lower halo
#pragma unroll
for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++)
{
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y]= (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0;
}
//Compute and store results
__syncthreads();
#pragma unroll
for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++)
{
float sum = 0;
#pragma unroll
for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++)
{
sum += d_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j];
}
d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch] = sum;
}
}
template<typename T, int RADIUS, int BLOCK_W, int BLOCK_H>
__global__ void convolutionColumnsKernel(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int pitch
)
{
using vector_type = int2;
const int elements_per_vector = sizeof(vector_type) / sizeof(T);
const int shared_block_width = BLOCK_H + ( KERNEL_RADIUS * 2 );
//estimated shared memory per block = 1 * (128+ (4*2/2)) * sizeof(int2) = 512 * sizeof(int2) = 2048
//per mp: 2048 * (2048/128) = 2048 * 16 = 32768 (=100% occupancy)
__shared__ vector_type s_Data[BLOCK_W*shared_block_width];
vector_type* src2 = reinterpret_cast<vector_type*>(d_Src);
vector_type* dest2 = reinterpret_cast<vector_type*>(d_Dst);
// T* s_Data2 = reinterpret_cast<T*>(s_Data);
// int imageWV = imageW / elements_per_vector;
int pitchV = pitch / elements_per_vector;
// const int tx = threadIdx.x;
// const int ty = threadIdx.y;
const int tx = threadIdx.y;
const int ty = threadIdx.x;
const int xp = blockIdx.x*BLOCK_W + tx;
const int yp = blockIdx.y*BLOCK_H + ty;
// const int baseX = blockIdx.x*BLOCK_W - KERNEL_RADIUS;
// const int baseXV = blockIdx.x * BLOCK_W - KERNEL_RADIUS / elements_per_vector;
const int baseY = blockIdx.y*BLOCK_H - KERNEL_RADIUS;
for (int i = ty; i < BLOCK_H + (KERNEL_RADIUS * 2); i+=BLOCK_H)
{
int y = baseY + i;
y = min(max(0,y),imageH-1);
s_Data[tx*shared_block_width + i] = src2[y * pitchV + xp];
// vector_type v;
// v.x = 1;
// v.y = 1;
// s_Data[tx*shared_block_width + i] = v;
// if(blockIdx.x == 0 && blockIdx.y == 0){
// printf("%d %d %d %d \n",threadIdx.x,threadIdx.y,y * pitchV + xp,threadIdx.x*shared_block_width + i);
// }
}
//Compute and store results
__syncthreads();
T sum[2];
sum[0] = 0;
sum[1] = 0;
#pragma unroll
for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++)
// for (int j = -0; j <= 0; j++)
{
int i = j + KERNEL_RADIUS;
// i = (i + threadIdx.x) % (KERNEL_RADIUS*2+1);
auto xoffset = tx * shared_block_width;
auto yoffset = ty;
auto v = s_Data[yoffset + xoffset + i];
// auto v = s_Data2[ (yoffset + xoffset + i) * elements_per_vector];
// auto v2 = s_Data2[ (yoffset + xoffset + i) * elements_per_vector + 1];
// auto v = s_Data2[0];
// auto v2 = s_Data2[0];
float k = d_Kernel[i];
// float k = i;
sum[0] += k * reinterpret_cast<T*>(&v)[0];
sum[1] += k * reinterpret_cast<T*>(&v)[1];
// sum[0] += v;
// sum[1] += v2;
}
dest2[yp * pitchV + xp] = *reinterpret_cast<vector_type*>(sum);
}
extern "C" void convolutionColumnsGPU(
float *d_Dst,
float *d_Src,
int imageW,
int imageH
)
{
assert(COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= KERNEL_RADIUS);
assert(imageW % COLUMNS_BLOCKDIM_X == 0);
assert(imageH % (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) == 0);
dim3 blocks(imageW / COLUMNS_BLOCKDIM_X / 2, imageH / (COLUMNS_BLOCKDIM_Y));
// dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y);
dim3 threads(COLUMNS_BLOCKDIM_Y, COLUMNS_BLOCKDIM_X);
hipLaunchKernelGGL(( convolutionColumnsKernel<float,KERNEL_RADIUS,COLUMNS_BLOCKDIM_X,COLUMNS_BLOCKDIM_Y>)
, dim3(blocks), dim3(threads), 0, 0,
d_Dst,
d_Src,
imageW,
imageH,
imageW
);
CUDA_SYNC_CHECK_ERROR();
}
template<typename T, int RADIUS, int BLOCK_W, int BLOCK_H>
__global__ static
void singlePassConvolve(ImageView<T> src, ImageView<T> dst)
{
__shared__ float buffer[(BLOCK_W + 2*RADIUS)*BLOCK_H];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int xp = blockIdx.x*BLOCK_W + tx;
const int yp = blockIdx.y*BLOCK_H + ty;
float *kernel = d_Kernel;
int nx = min(max(0,xp-RADIUS),src.width-1);
float *buff = buffer + ty*(BLOCK_W + 2*RADIUS);
// int h = src.height-1;
// int pitch = src.pitch;
if (yp<src.height){
float sum = 0;
for (int j=-RADIUS;j<=RADIUS;j++){
int ny = min(max(0,yp+j),src.height-1);
sum += src(nx,ny) * kernel[j+RADIUS];
}
buff[tx] = sum;
}
__syncthreads();
if (tx<BLOCK_W && xp<src.width && yp<src.height) {
float sum = 0;
for (int j=-RADIUS;j<=RADIUS;j++){
int id = tx + j + RADIUS;
sum += buff[id] * kernel[j+RADIUS];
}
dst(xp,yp) = sum;
}
}
void convolutionTest(){
CUDA_SYNC_CHECK_ERROR();
const int kernel_radius = KERNEL_RADIUS;
const int kernel_size = kernel_radius * 2 + 1;
float sigma = 2.0f;
int h = 2048;
int w = h * 2;
size_t N = w * h;
size_t readWrites = N * 2 * sizeof(float);
Saiga::CUDA::PerformanceTestHelper pth("convolutionTest radius: " + std::to_string(kernel_radius), readWrites);
thrust::device_vector<float> src(N,0.1);
thrust::device_vector<float> dest(N,0.1);
thrust::device_vector<float> tmp(N,0.1);
thrust::host_vector<float> h_src = src;
thrust::host_vector<float> h_dest = dest;
thrust::host_vector<float> h_tmp = dest;
thrust::host_vector<float> h_ref = dest;
ImageView<float> imgSrc(w,h,thrust::raw_pointer_cast(src.data()));
ImageView<float> imgDst(w,h,thrust::raw_pointer_cast(dest.data()));
ImageView<float> imgTmp(w,h,thrust::raw_pointer_cast(tmp.data()));
ImageView<float> h_imgSrc(w,h,thrust::raw_pointer_cast(h_src.data()));
ImageView<float> h_imgDst(w,h,thrust::raw_pointer_cast(h_dest.data()));
ImageView<float> h_imgTmp(w,h,thrust::raw_pointer_cast(h_tmp.data()));
thrust::host_vector<float> h_kernel(kernel_size);
float kernelSum = 0.0f;
float ivar2 = 1.0f/(2.0f*sigma*sigma);
for (int j=-kernel_radius;j<=kernel_radius;j++) {
h_kernel[j+kernel_radius] = (float)expf(-(double)j*j*ivar2);
kernelSum += h_kernel[j+kernel_radius];
}
for (int j=-kernel_radius;j<=kernel_radius;j++){
h_kernel[j+kernel_radius] /= kernelSum;
// cout << h_kernel[j+kernel_radius] << endl;
}
{
for(int y = 0; y < h; ++y){
for(int x = 0; x < w; ++x){
h_imgSrc(x,y) = (rand()%3) - 1;
}
}
src = h_src;
}
// copyConvolutionKernel(h_kernel);
CHECK_CUDA_ERROR(hipMemcpyToSymbol(d_Kernel, h_kernel.data(), h_kernel.size()*sizeof(float)));
{
float time;
{
Saiga::ScopedTimer<float> t(&time);
for(int y = 0; y < h; ++y){
for(int x = 0; x < w; ++x){
float sum = 0;
for (int j=-kernel_radius;j<=kernel_radius;j++){
int ny = ::min(::max(0,y+j),h-1);
float innerSum = 0;
for (int i=-kernel_radius;i<=kernel_radius;i++){
int nx = ::min(::max(0,x+i),w-1);
innerSum += h_imgSrc(nx,ny) * h_kernel[i+kernel_radius];
}
sum += innerSum * h_kernel[j+kernel_radius];
}
h_imgDst(x,y) = sum;
}
}
}
pth.addMeassurement("CPU Convolve",time);
h_ref = h_dest;
// cout << "h_ref[0]=" << h_ref[0] << endl;
}
{
float time;
{
Saiga::ScopedTimer<float> t(&time);
for(int y = 0; y < h; ++y){
for(int x = 0; x < w; ++x){
float sum = 0;
for (int j=-kernel_radius;j<=kernel_radius;j++){
int nx = ::min(::max(0,x+j),w-1);
sum += h_imgSrc(nx,y) * h_kernel[j+kernel_radius];
}
h_imgTmp(x,y) = sum;
}
}
for(int x = 0; x < w; ++x){
for(int y = 0; y < h; ++y){
float sum = 0;
for (int j=-kernel_radius;j<=kernel_radius;j++){
int ny = ::min(::max(0,y+j),h-1);
sum += h_imgTmp(x,ny) * h_kernel[j+kernel_radius];
}
h_imgDst(x,y) = sum;
}
}
}
pth.addMeassurement("CPU Convolve Separate",time);
SAIGA_ASSERT(h_ref == h_dest);
}
{
dest = src;
float time;
{
Saiga::CUDA::CudaScopedTimer t(time);
const int LOWPASS_W = 32;
const int LOWPASS_H = 16;
dim3 blocks(Saiga::CUDA::getBlockCount(w, LOWPASS_W), Saiga::CUDA::getBlockCount(h, LOWPASS_H));
dim3 threads(LOWPASS_W+2*kernel_radius, LOWPASS_H);
hipLaunchKernelGGL(( singlePassConvolve<float,kernel_radius,LOWPASS_W,LOWPASS_H>) , dim3(blocks), dim3(threads), 0, 0, imgSrc,imgDst);
}
pth.addMeassurement("GPU Convolve Single Pass",time);
thrust::host_vector<float> test = dest;
for(int i = 0; i < test.size();++i){
if(std::abs(test[i]-h_ref[i]) > 1e-5){
cout << "error " << i << " " << test[i] << "!=" << h_ref[i] << endl;
SAIGA_ASSERT(0);
}
}
}
{
thrust::device_vector<float> d_kernel = h_kernel;
dest = src;
float time;
{
Saiga::CUDA::CudaScopedTimer t(time);
convolveSinglePassSeparate(imgSrc,imgDst,d_kernel,4);
}
pth.addMeassurement("GPU Convolve Single Pass2",time);
thrust::host_vector<float> test = dest;
for(int i = 0; i < test.size();++i){
if(std::abs(test[i]-h_ref[i]) > 1e-5){
cout << "error " << i << " " << test[i] << "!=" << h_ref[i] << endl;
SAIGA_ASSERT(0);
}
}
}
{
dest = src;
tmp = src;
float time1;
{
Saiga::CUDA::CudaScopedTimer t(time1);
convolutionRowsGPU((float*)imgTmp.data,(float*)imgSrc.data,w,h);
}
pth.addMeassurement("GPU Convolve Separate Row",time1);
float time2;
{
Saiga::CUDA::CudaScopedTimer t(time2);
convolutionColumnsGPU((float*)imgDst.data,(float*)imgTmp.data,w,h);
}
pth.addMeassurement("GPU Convolve Separate Col",time2);
pth.addMeassurement("GPU Convolve Separate Total",time1+time2);
thrust::host_vector<float> test = dest;
for(int i = 0; i < test.size();++i){
if(std::abs(test[i]-h_ref[i]) > 1e-5){
cout << "error " << i << " " << test[i] << "!=" << h_ref[i] << " " << h_tmp[i] << endl;
SAIGA_ASSERT(0);
}
}
}
{
float time;
{
Saiga::CUDA::CudaScopedTimer t(time);
hipMemcpy(thrust::raw_pointer_cast(dest.data()),thrust::raw_pointer_cast(src.data()),N * sizeof(int),hipMemcpyDeviceToDevice);
}
pth.addMeassurement("hipMemcpy", time);
}
CUDA_SYNC_CHECK_ERROR();
}
}
}
| a73974292ee26ab7ac5d4edf4686cd1e442d06df.cu | /**
* Copyright (c) 2017 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/cuda/imageProcessing/convolution.h"
#include "saiga/cuda/tests/test_helper.h"
#include "saiga/cuda/tests/test.h"
#include "saiga/cuda/thread_info.h"
#include "saiga/cuda/cudaHelper.h"
#include "saiga/time/timer.h"
using std::cout;
using std::endl;
namespace Saiga {
namespace CUDA {
__constant__ float d_Kernel[MAX_RADIUS*2+1];
#define KERNEL_RADIUS 4
////////////////////////////////////////////////////////////////////////////////
// Row convolution filter
////////////////////////////////////////////////////////////////////////////////
#define ROWS_BLOCKDIM_X 128
#define ROWS_BLOCKDIM_Y 1
#define ROWS_RESULT_STEPS 8
#define ROWS_HALO_STEPS 1
__global__ void convolutionRowsKernel2(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int pitch
)
{
__shared__ float s_Data[ROWS_BLOCKDIM_Y][(ROWS_RESULT_STEPS + 2 * ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X];
//Offset to the left halo edge
const int baseX = (blockIdx.x * ROWS_RESULT_STEPS - ROWS_HALO_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y;
d_Src += baseY * pitch + baseX;
d_Dst += baseY * pitch + baseX;
//Load main data
#pragma unroll
for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++)
{
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = d_Src[i * ROWS_BLOCKDIM_X];
}
//Load left halo
#pragma unroll
for (int i = 0; i < ROWS_HALO_STEPS; i++)
{
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX >= -i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
}
//Load right halo
#pragma unroll
for (int i = ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS + ROWS_HALO_STEPS; i++)
{
s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X] = (imageW - baseX > i * ROWS_BLOCKDIM_X) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
}
//Compute and store results
__syncthreads();
#pragma unroll
for (int i = ROWS_HALO_STEPS; i < ROWS_HALO_STEPS + ROWS_RESULT_STEPS; i++)
{
float sum = 0;
#pragma unroll
for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++)
{
sum += d_Kernel[KERNEL_RADIUS + j] * s_Data[threadIdx.y][threadIdx.x + i * ROWS_BLOCKDIM_X + j];
}
d_Dst[i * ROWS_BLOCKDIM_X] = sum;
}
}
template<typename T, int RADIUS, int BLOCK_W, int BLOCK_H>
__global__ void convolutionRowsKernel(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int pitch
)
{
using vector_type = int2;
const int elements_per_vector = sizeof(vector_type) / sizeof(T);
const int shared_block_width = BLOCK_W + ( KERNEL_RADIUS * 2 / elements_per_vector);
//estimated shared memory per block = 1 * (128+ (4*2/2)) * sizeof(int2) = 512 * sizeof(int2) = 2048
//per mp: 2048 * (2048/128) = 2048 * 16 = 32768 (=100% occupancy)
__shared__ vector_type s_Data[BLOCK_H*shared_block_width];
vector_type* src2 = reinterpret_cast<vector_type*>(d_Src);
vector_type* dest2 = reinterpret_cast<vector_type*>(d_Dst);
T* s_Data2 = reinterpret_cast<T*>(s_Data);
int imageWV = imageW / elements_per_vector;
int pitchV = pitch / elements_per_vector;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int xp = blockIdx.x*BLOCK_W + tx;
const int yp = blockIdx.y*BLOCK_H + ty;
// const int baseX = blockIdx.x*BLOCK_W - KERNEL_RADIUS;
const int baseXV = blockIdx.x * BLOCK_W - KERNEL_RADIUS / elements_per_vector;
for (int i = threadIdx.x; i < BLOCK_W + (KERNEL_RADIUS * 2 / elements_per_vector); i+=BLOCK_W)
{
int x = baseXV + i;
x = min(max(0,x),imageWV-1);
auto v = src2[yp * pitchV + x];
if(baseXV + i < 0){
v.y = v.x;
}
if(baseXV + i >= imageWV){
v.x = v.y;
}
s_Data[threadIdx.y*shared_block_width+i] = v;
}
//Compute and store results
__syncthreads();
T sum[2];
sum[0] = 0;
sum[1] = 0;
for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++)
{
auto xoffset = threadIdx.x * elements_per_vector;
auto yoffset = threadIdx.y * shared_block_width * elements_per_vector;
sum[0] += d_Kernel[KERNEL_RADIUS + j] * s_Data2[yoffset + xoffset + (KERNEL_RADIUS + j)];
sum[1] += d_Kernel[KERNEL_RADIUS + j] * s_Data2[yoffset + xoffset + (KERNEL_RADIUS + j) + 1];
}
dest2[yp * pitchV + xp] = *reinterpret_cast<vector_type*>(sum);
}
extern "C" void convolutionRowsGPU(
float *d_Dst,
float *d_Src,
int imageW,
int imageH
)
{
assert(ROWS_BLOCKDIM_X * ROWS_HALO_STEPS >= KERNEL_RADIUS);
assert(imageW % (ROWS_RESULT_STEPS * ROWS_BLOCKDIM_X) == 0);
assert(imageH % ROWS_BLOCKDIM_Y == 0);
dim3 blocks(imageW / (ROWS_BLOCKDIM_X) / 2, imageH / ROWS_BLOCKDIM_Y);
// dim3 blocks(16,1);
dim3 threads(ROWS_BLOCKDIM_X, ROWS_BLOCKDIM_Y);
convolutionRowsKernel<float,KERNEL_RADIUS,ROWS_BLOCKDIM_X,ROWS_BLOCKDIM_Y>
<<<blocks, threads>>>(
d_Dst,
d_Src,
imageW,
imageH,
imageW
);
CUDA_SYNC_CHECK_ERROR();
}
////////////////////////////////////////////////////////////////////////////////
// Column convolution filter
////////////////////////////////////////////////////////////////////////////////
#define COLUMNS_BLOCKDIM_X 4
#define COLUMNS_BLOCKDIM_Y 64
#define COLUMNS_RESULT_STEPS 8
#define COLUMNS_HALO_STEPS 1
__global__ void convolutionColumnsKernel2(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int pitch
)
{
__shared__ float s_Data[COLUMNS_BLOCKDIM_X][(COLUMNS_RESULT_STEPS + 2 * COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + 1];
//Offset to the upper halo edge
const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x;
const int baseY = (blockIdx.y * COLUMNS_RESULT_STEPS - COLUMNS_HALO_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y;
d_Src += baseY * pitch + baseX;
d_Dst += baseY * pitch + baseX;
//Main data
#pragma unroll
for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++)
{
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = d_Src[i * COLUMNS_BLOCKDIM_Y * pitch];
}
//Upper halo
#pragma unroll
for (int i = 0; i < COLUMNS_HALO_STEPS; i++)
{
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY >= -i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0;
}
//Lower halo
#pragma unroll
for (int i = COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS + COLUMNS_HALO_STEPS; i++)
{
s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y]= (imageH - baseY > i * COLUMNS_BLOCKDIM_Y) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0;
}
//Compute and store results
__syncthreads();
#pragma unroll
for (int i = COLUMNS_HALO_STEPS; i < COLUMNS_HALO_STEPS + COLUMNS_RESULT_STEPS; i++)
{
float sum = 0;
#pragma unroll
for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++)
{
sum += d_Kernel[KERNEL_RADIUS - j] * s_Data[threadIdx.x][threadIdx.y + i * COLUMNS_BLOCKDIM_Y + j];
}
d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch] = sum;
}
}
template<typename T, int RADIUS, int BLOCK_W, int BLOCK_H>
__global__ void convolutionColumnsKernel(
float *d_Dst,
float *d_Src,
int imageW,
int imageH,
int pitch
)
{
using vector_type = int2;
const int elements_per_vector = sizeof(vector_type) / sizeof(T);
const int shared_block_width = BLOCK_H + ( KERNEL_RADIUS * 2 );
//estimated shared memory per block = 1 * (128+ (4*2/2)) * sizeof(int2) = 512 * sizeof(int2) = 2048
//per mp: 2048 * (2048/128) = 2048 * 16 = 32768 (=100% occupancy)
__shared__ vector_type s_Data[BLOCK_W*shared_block_width];
vector_type* src2 = reinterpret_cast<vector_type*>(d_Src);
vector_type* dest2 = reinterpret_cast<vector_type*>(d_Dst);
// T* s_Data2 = reinterpret_cast<T*>(s_Data);
// int imageWV = imageW / elements_per_vector;
int pitchV = pitch / elements_per_vector;
// const int tx = threadIdx.x;
// const int ty = threadIdx.y;
const int tx = threadIdx.y;
const int ty = threadIdx.x;
const int xp = blockIdx.x*BLOCK_W + tx;
const int yp = blockIdx.y*BLOCK_H + ty;
// const int baseX = blockIdx.x*BLOCK_W - KERNEL_RADIUS;
// const int baseXV = blockIdx.x * BLOCK_W - KERNEL_RADIUS / elements_per_vector;
const int baseY = blockIdx.y*BLOCK_H - KERNEL_RADIUS;
for (int i = ty; i < BLOCK_H + (KERNEL_RADIUS * 2); i+=BLOCK_H)
{
int y = baseY + i;
y = min(max(0,y),imageH-1);
s_Data[tx*shared_block_width + i] = src2[y * pitchV + xp];
// vector_type v;
// v.x = 1;
// v.y = 1;
// s_Data[tx*shared_block_width + i] = v;
// if(blockIdx.x == 0 && blockIdx.y == 0){
// printf("%d %d %d %d \n",threadIdx.x,threadIdx.y,y * pitchV + xp,threadIdx.x*shared_block_width + i);
// }
}
//Compute and store results
__syncthreads();
T sum[2];
sum[0] = 0;
sum[1] = 0;
#pragma unroll
for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++)
// for (int j = -0; j <= 0; j++)
{
int i = j + KERNEL_RADIUS;
// i = (i + threadIdx.x) % (KERNEL_RADIUS*2+1);
auto xoffset = tx * shared_block_width;
auto yoffset = ty;
auto v = s_Data[yoffset + xoffset + i];
// auto v = s_Data2[ (yoffset + xoffset + i) * elements_per_vector];
// auto v2 = s_Data2[ (yoffset + xoffset + i) * elements_per_vector + 1];
// auto v = s_Data2[0];
// auto v2 = s_Data2[0];
float k = d_Kernel[i];
// float k = i;
sum[0] += k * reinterpret_cast<T*>(&v)[0];
sum[1] += k * reinterpret_cast<T*>(&v)[1];
// sum[0] += v;
// sum[1] += v2;
}
dest2[yp * pitchV + xp] = *reinterpret_cast<vector_type*>(sum);
}
extern "C" void convolutionColumnsGPU(
float *d_Dst,
float *d_Src,
int imageW,
int imageH
)
{
assert(COLUMNS_BLOCKDIM_Y * COLUMNS_HALO_STEPS >= KERNEL_RADIUS);
assert(imageW % COLUMNS_BLOCKDIM_X == 0);
assert(imageH % (COLUMNS_RESULT_STEPS * COLUMNS_BLOCKDIM_Y) == 0);
dim3 blocks(imageW / COLUMNS_BLOCKDIM_X / 2, imageH / (COLUMNS_BLOCKDIM_Y));
// dim3 threads(COLUMNS_BLOCKDIM_X, COLUMNS_BLOCKDIM_Y);
dim3 threads(COLUMNS_BLOCKDIM_Y, COLUMNS_BLOCKDIM_X);
convolutionColumnsKernel<float,KERNEL_RADIUS,COLUMNS_BLOCKDIM_X,COLUMNS_BLOCKDIM_Y>
<<<blocks, threads>>>(
d_Dst,
d_Src,
imageW,
imageH,
imageW
);
CUDA_SYNC_CHECK_ERROR();
}
template<typename T, int RADIUS, int BLOCK_W, int BLOCK_H>
__global__ static
void singlePassConvolve(ImageView<T> src, ImageView<T> dst)
{
__shared__ float buffer[(BLOCK_W + 2*RADIUS)*BLOCK_H];
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int xp = blockIdx.x*BLOCK_W + tx;
const int yp = blockIdx.y*BLOCK_H + ty;
float *kernel = d_Kernel;
int nx = min(max(0,xp-RADIUS),src.width-1);
float *buff = buffer + ty*(BLOCK_W + 2*RADIUS);
// int h = src.height-1;
// int pitch = src.pitch;
if (yp<src.height){
float sum = 0;
for (int j=-RADIUS;j<=RADIUS;j++){
int ny = min(max(0,yp+j),src.height-1);
sum += src(nx,ny) * kernel[j+RADIUS];
}
buff[tx] = sum;
}
__syncthreads();
if (tx<BLOCK_W && xp<src.width && yp<src.height) {
float sum = 0;
for (int j=-RADIUS;j<=RADIUS;j++){
int id = tx + j + RADIUS;
sum += buff[id] * kernel[j+RADIUS];
}
dst(xp,yp) = sum;
}
}
void convolutionTest(){
CUDA_SYNC_CHECK_ERROR();
const int kernel_radius = KERNEL_RADIUS;
const int kernel_size = kernel_radius * 2 + 1;
float sigma = 2.0f;
int h = 2048;
int w = h * 2;
size_t N = w * h;
size_t readWrites = N * 2 * sizeof(float);
Saiga::CUDA::PerformanceTestHelper pth("convolutionTest radius: " + std::to_string(kernel_radius), readWrites);
thrust::device_vector<float> src(N,0.1);
thrust::device_vector<float> dest(N,0.1);
thrust::device_vector<float> tmp(N,0.1);
thrust::host_vector<float> h_src = src;
thrust::host_vector<float> h_dest = dest;
thrust::host_vector<float> h_tmp = dest;
thrust::host_vector<float> h_ref = dest;
ImageView<float> imgSrc(w,h,thrust::raw_pointer_cast(src.data()));
ImageView<float> imgDst(w,h,thrust::raw_pointer_cast(dest.data()));
ImageView<float> imgTmp(w,h,thrust::raw_pointer_cast(tmp.data()));
ImageView<float> h_imgSrc(w,h,thrust::raw_pointer_cast(h_src.data()));
ImageView<float> h_imgDst(w,h,thrust::raw_pointer_cast(h_dest.data()));
ImageView<float> h_imgTmp(w,h,thrust::raw_pointer_cast(h_tmp.data()));
thrust::host_vector<float> h_kernel(kernel_size);
float kernelSum = 0.0f;
float ivar2 = 1.0f/(2.0f*sigma*sigma);
for (int j=-kernel_radius;j<=kernel_radius;j++) {
h_kernel[j+kernel_radius] = (float)expf(-(double)j*j*ivar2);
kernelSum += h_kernel[j+kernel_radius];
}
for (int j=-kernel_radius;j<=kernel_radius;j++){
h_kernel[j+kernel_radius] /= kernelSum;
// cout << h_kernel[j+kernel_radius] << endl;
}
{
for(int y = 0; y < h; ++y){
for(int x = 0; x < w; ++x){
h_imgSrc(x,y) = (rand()%3) - 1;
}
}
src = h_src;
}
// copyConvolutionKernel(h_kernel);
CHECK_CUDA_ERROR(cudaMemcpyToSymbol(d_Kernel, h_kernel.data(), h_kernel.size()*sizeof(float)));
{
float time;
{
Saiga::ScopedTimer<float> t(&time);
for(int y = 0; y < h; ++y){
for(int x = 0; x < w; ++x){
float sum = 0;
for (int j=-kernel_radius;j<=kernel_radius;j++){
int ny = std::min(std::max(0,y+j),h-1);
float innerSum = 0;
for (int i=-kernel_radius;i<=kernel_radius;i++){
int nx = std::min(std::max(0,x+i),w-1);
innerSum += h_imgSrc(nx,ny) * h_kernel[i+kernel_radius];
}
sum += innerSum * h_kernel[j+kernel_radius];
}
h_imgDst(x,y) = sum;
}
}
}
pth.addMeassurement("CPU Convolve",time);
h_ref = h_dest;
// cout << "h_ref[0]=" << h_ref[0] << endl;
}
{
float time;
{
Saiga::ScopedTimer<float> t(&time);
for(int y = 0; y < h; ++y){
for(int x = 0; x < w; ++x){
float sum = 0;
for (int j=-kernel_radius;j<=kernel_radius;j++){
int nx = std::min(std::max(0,x+j),w-1);
sum += h_imgSrc(nx,y) * h_kernel[j+kernel_radius];
}
h_imgTmp(x,y) = sum;
}
}
for(int x = 0; x < w; ++x){
for(int y = 0; y < h; ++y){
float sum = 0;
for (int j=-kernel_radius;j<=kernel_radius;j++){
int ny = std::min(std::max(0,y+j),h-1);
sum += h_imgTmp(x,ny) * h_kernel[j+kernel_radius];
}
h_imgDst(x,y) = sum;
}
}
}
pth.addMeassurement("CPU Convolve Separate",time);
SAIGA_ASSERT(h_ref == h_dest);
}
{
dest = src;
float time;
{
Saiga::CUDA::CudaScopedTimer t(time);
const int LOWPASS_W = 32;
const int LOWPASS_H = 16;
dim3 blocks(Saiga::CUDA::getBlockCount(w, LOWPASS_W), Saiga::CUDA::getBlockCount(h, LOWPASS_H));
dim3 threads(LOWPASS_W+2*kernel_radius, LOWPASS_H);
singlePassConvolve<float,kernel_radius,LOWPASS_W,LOWPASS_H> <<<blocks, threads>>>(imgSrc,imgDst);
}
pth.addMeassurement("GPU Convolve Single Pass",time);
thrust::host_vector<float> test = dest;
for(int i = 0; i < test.size();++i){
if(std::abs(test[i]-h_ref[i]) > 1e-5){
cout << "error " << i << " " << test[i] << "!=" << h_ref[i] << endl;
SAIGA_ASSERT(0);
}
}
}
{
thrust::device_vector<float> d_kernel = h_kernel;
dest = src;
float time;
{
Saiga::CUDA::CudaScopedTimer t(time);
convolveSinglePassSeparate(imgSrc,imgDst,d_kernel,4);
}
pth.addMeassurement("GPU Convolve Single Pass2",time);
thrust::host_vector<float> test = dest;
for(int i = 0; i < test.size();++i){
if(std::abs(test[i]-h_ref[i]) > 1e-5){
cout << "error " << i << " " << test[i] << "!=" << h_ref[i] << endl;
SAIGA_ASSERT(0);
}
}
}
{
dest = src;
tmp = src;
float time1;
{
Saiga::CUDA::CudaScopedTimer t(time1);
convolutionRowsGPU((float*)imgTmp.data,(float*)imgSrc.data,w,h);
}
pth.addMeassurement("GPU Convolve Separate Row",time1);
float time2;
{
Saiga::CUDA::CudaScopedTimer t(time2);
convolutionColumnsGPU((float*)imgDst.data,(float*)imgTmp.data,w,h);
}
pth.addMeassurement("GPU Convolve Separate Col",time2);
pth.addMeassurement("GPU Convolve Separate Total",time1+time2);
thrust::host_vector<float> test = dest;
for(int i = 0; i < test.size();++i){
if(std::abs(test[i]-h_ref[i]) > 1e-5){
cout << "error " << i << " " << test[i] << "!=" << h_ref[i] << " " << h_tmp[i] << endl;
SAIGA_ASSERT(0);
}
}
}
{
float time;
{
Saiga::CUDA::CudaScopedTimer t(time);
cudaMemcpy(thrust::raw_pointer_cast(dest.data()),thrust::raw_pointer_cast(src.data()),N * sizeof(int),cudaMemcpyDeviceToDevice);
}
pth.addMeassurement("cudaMemcpy", time);
}
CUDA_SYNC_CHECK_ERROR();
}
}
}
|
83753fce4e4037df51c50149ebd757ec368cd711.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<256, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, true>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 83753fce4e4037df51c50149ebd757ec368cd711.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<32>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<32>;
using ThreadBlockShape = cutlass::gemm::GemmShape<256, 128, 64>;
using WarpShape = cutlass::gemm::GemmShape<64, 64, 64>;
using InstructionShape = cutlass::gemm::GemmShape<8, 8, 16>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationClamp<
int8_t, 8, int32_t, int32_t, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutSrc, int32_t, LayoutSrc, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassTensorOp, cutlass::arch::Sm75,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 16, 16, true>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const int8_t* d_src,
const int8_t* d_filter,
const int32_t* d_bias,
const int8_t* d_z,
int8_t* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
e8abeb1c0e60933aafb416f2da0384178d01a0bd.hip | // !!! This is a file automatically generated by hipify!!!
// CUDA runtime
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
// Utilities and system includes
//#include <helper_functions.h>
//#include <helper_cuda.h>
#include "ColorSpace.h"
/*
* LMS
* ---
* http://biecoll.ub.uni-bielefeld.de/volltexte/2007/52/pdf/ICVS2007-6.pdf
*/
__global__ void rgb2lms(uchar *R, uchar *G, uchar *B,
uchar *L, uchar *M, uchar *S, int *N) {
int index;
index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < N[0]){
float r, g, b;
uchar l, m, s;
r = (float)R[index];
g = (float)G[index];
b = (float)B[index];
l = uchar(0.271873f*r + 0.661593f*g + 0.062627f*b);
m = uchar(0.099837f*r + 0.784534f*g + 0.111723f*b);
s = uchar(0.017750f*r + 0.109197f*g + 0.869146f*b);
L[index] = l;
M[index] = m;
S[index] = s;
}
}
/*
* XYZ (Adobe RGB [1998])
* ----------------------
* http://brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html
*/
__global__ void rgb2xyz(uchar *R, uchar *G, uchar *B,
uchar *X, uchar *Y, uchar *Z, int *N) {
int index;
index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < N[0]){
float r, g, b;
uchar x, y, z;
r = float(R[index]);
g = float(G[index]);
b = float(B[index]);
x = uchar(0.604410f*r + 0.194460f*g + 0.197220f*b);
y = uchar(0.296215f*r + 0.624898f*g + 0.074980f*b);
z = uchar(0.024732f*r + 0.064667f*g + 0.906695f*b);
X[index] = x;
Y[index] = y;
Z[index] = z;
}
}
/*
* CMY
* ---
*/
__global__ void rgb2cmy(uchar *R, uchar *G, uchar *B,
uchar *C, uchar *M, uchar *Y, int *N) {
int index;
index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < N[0]){
uchar r, g, b;
uchar c, m, y;
r = R[index];
g = G[index];
b = B[index];
c = 255 - r;
m = 255 - g;
y = 255 - b;
C[index] = c;
M[index] = m;
Y[index] = y;
}
}
/*
* HSL
* ---
*/
__global__ void rgb2hsl(uchar *R, uchar *G, uchar *B,
uchar *H, uchar *S, uchar *L, int *N) {
int index;
index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < N[0]){
float r, g, b, min, max, c;
float h, s, l;
r = float(R[index]);
g = float(G[index]);
b = float(B[index]);
// min/max calculate
max = fmaxf( r ,g);
max = fmaxf(max,b);
min = fminf( r ,g);
min = fminf(min,b);
c = max - min;
// Hue and chroma
if( max == r ){ h = fmodf((g-b)/c , 6.0f);
}else if( max == g ){ h = (b-r)/c + 2.0f ;
}else if( max == b ){ h = (r-g)/c + 4.0f ;
}else { h = 0.0f ;
}
h = h*255/360;
// Lightness
l = (max + min)/2.0;
// Saturation
s = 1.0 - fabsf(2.0*l-1.0);
if (s == 0.0f){
s = 0.0;
}else{
s = c/s;
}
H[index] = (uchar)h;
S[index] = (uchar)s;
L[index] = (uchar)l;
}
}
/*
* HSV
* ---
*/
__global__ void rgb2hsv(uchar *R, uchar *G, uchar *B,
uchar *H, uchar *S, uchar *V, int *N) {
int index;
index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < N[0]){
float r, g, b, min, max, c;
float h, s, v;
r = float(R[index]);
g = float(G[index]);
b = float(B[index]);
// min/max calculate
max = fmaxf( r ,g);
max = fmaxf(max,b);
min = fminf( r ,g);
min = fminf(min,b);
c = max - min;
// Hue and chroma
if( max == r ){ h = fmodf((g-b)/c , 6.0f);
}else if( max == g ){ h = (b-r)/c + 2.0f ;
}else if( max == b ){ h = (r-g)/c + 4.0f ;
}else { h = 0.0f ;
}
h = h*255/360;
// Lightness
v = max;
// Saturation
if (v == 0.0f){
s = 0.0;
}else{
s = c*2.55f/v;
}
H[index] = (uchar)h;
S[index] = (uchar)s;
V[index] = (uchar)v;
}
}
/*
* YIQ
* ---
*/
__global__ void rgb2yiq(uchar *R, uchar *G, uchar *B,
uchar *Y, uchar *I, uchar *Q, int *N) {
int index;
index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < N[0]){
float r, g, b;
uchar y, i, q;
r = float(R[index]);
g = float(G[index]);
b = float(B[index]);
y = uchar( 0.29783f*r + 0.58471f*g + 0.11355f*b );
i = uchar( 0.49805f*r - 0.22897f*g - 0.26908f*b + 127.5f );
q = uchar( 0.20093f*r - 0.49805f*g + 0.29711f*b + 127.5f );
Y[index] = y;
I[index] = i;
Q[index] = q;
}
}
/*
* YUV (BT.709)
* ------------
*/
__global__ void rgb2yuv(uchar *R, uchar *G, uchar *B,
uchar *Y, uchar *U, uchar *V, int *N) {
int index;
index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < N[0]){
float r, g, b;
uchar y, u, v;
r = float(R[index]);
g = float(G[index]);
b = float(B[index]);
y = (uchar)( 0.2126 *r + 0.7152 *g + 0.0722 *b);
u = (uchar)(-0.09991*r - 0.33609*g + 0.436 *b);
v = (uchar)( 0.6150 *r - 0.55861*g - 0.05639*b);
Y[index] = y;
U[index] = u;
V[index] = v;
}
}
/*
* YCbCr
* -----
*/
__global__ void rgb2yCbCr(uchar *R, uchar *G , uchar *B,
uchar *Y, uchar *Cb, uchar *Cr, int *N) {
int index;
index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < N[0]){
float r, g, b;
uchar y, cb, cr;
r = (float)R[index];
g = (float)G[index];
b = (float)B[index];
y = uchar( 0.211770f*r + 0.712406f*g + 0.071918f*b);
cb = uchar(-0.114130f*r - 0.383920f*g + 0.498050f*b + 127.50f);
cr = uchar( 0.498047f*r - 0.452380f*g - 0.045666f*b + 127.50f);
Y [index] = y;
Cb[index] = cb;
Cr[index] = cr;
}
}
/*
* Transform color models
* ----------------------
* Modelos de color:
* - 0: CMY - 4: LMS
* - 1: HSL - 5: YIQ
* - 2: HSV - 6: YUV
* - 3: XYZ - 7: YCbCr
*/
extern "C" void transformColorModel(uchar *h_R , uchar *h_G , uchar *h_B ,
uchar *h_C1, uchar *h_C2, uchar *h_C3,
int n, uint model){
uchar *d_R , *d_G , *d_B ,
*d_C1, *d_C2, *d_C3;
int *d_n;
hipMalloc(&d_R , n * sizeof(uchar));
hipMalloc(&d_G , n * sizeof(uchar));
hipMalloc(&d_B , n * sizeof(uchar));
hipMalloc(&d_C1, n * sizeof(uchar));
hipMalloc(&d_C2, n * sizeof(uchar));
hipMalloc(&d_C3, n * sizeof(uchar));
hipMalloc(&d_n , sizeof( int ));
hipMemcpy(d_R, h_R, n * sizeof(uchar), hipMemcpyHostToDevice);
hipMemcpy(d_G, h_G, n * sizeof(uchar), hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, n * sizeof(uchar), hipMemcpyHostToDevice);
hipMemcpy(d_n, &n, sizeof( int ), hipMemcpyHostToDevice);
int Nthreads = 128;
int Nblocks = (int)ceil( ((float)n+((float)Nthreads-1.0)) / ((float)Nthreads) );
switch(model){
// CMY
case 0:
hipLaunchKernelGGL(( rgb2cmy), dim3(Nblocks),dim3(Nthreads) , 0, 0, d_R ,d_G ,d_B ,
d_C1,d_C2,d_C3, d_n);
break;
// HSL
case 1:
hipLaunchKernelGGL(( rgb2hsl), dim3(Nblocks),dim3(Nthreads) , 0, 0, d_R ,d_G ,d_B ,
d_C1,d_C2,d_C3, d_n);
break;
// HSV
case 2:
hipLaunchKernelGGL(( rgb2hsv), dim3(Nblocks),dim3(Nthreads) , 0, 0, d_R ,d_G ,d_B ,
d_C1,d_C2,d_C3, d_n);
break;
// XYZ
case 3:
hipLaunchKernelGGL(( rgb2xyz), dim3(Nblocks),dim3(Nthreads) , 0, 0, d_R ,d_G ,d_B ,
d_C1,d_C2,d_C3, d_n);
break;
// LMS
case 4:
hipLaunchKernelGGL(( rgb2lms), dim3(Nblocks),dim3(Nthreads) , 0, 0, d_R ,d_G ,d_B ,
d_C1,d_C2,d_C3, d_n);
break;
// YIQ
case 5:
hipLaunchKernelGGL(( rgb2yiq), dim3(Nblocks),dim3(Nthreads) , 0, 0, d_R ,d_G ,d_B ,
d_C1,d_C2,d_C3, d_n);
break;
// YUV
case 6:
hipLaunchKernelGGL(( rgb2yuv), dim3(Nblocks),dim3(Nthreads) , 0, 0, d_R ,d_G ,d_B ,
d_C1,d_C2,d_C3, d_n);
break;
// YCbCr
case 7:
hipLaunchKernelGGL(( rgb2yCbCr), dim3(Nblocks),dim3(Nthreads) , 0, 0, d_R ,d_G ,d_B ,
d_C1,d_C2,d_C3, d_n);
break;
}
hipMemcpy(h_C1, d_C1, n * sizeof(uchar), hipMemcpyDeviceToHost);
hipMemcpy(h_C2, d_C2, n * sizeof(uchar), hipMemcpyDeviceToHost);
hipMemcpy(h_C3, d_C3, n * sizeof(uchar), hipMemcpyDeviceToHost);
hipFree(d_n);
hipFree(d_R);
hipFree(d_G);
hipFree(d_B);
hipFree(d_C1);
hipFree(d_C2);
hipFree(d_C3);
}
| e8abeb1c0e60933aafb416f2da0384178d01a0bd.cu | // CUDA runtime
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
// Utilities and system includes
//#include <helper_functions.h>
//#include <helper_cuda.h>
#include "ColorSpace.h"
/*
* LMS
* ---
* http://biecoll.ub.uni-bielefeld.de/volltexte/2007/52/pdf/ICVS2007-6.pdf
*/
__global__ void rgb2lms(uchar *R, uchar *G, uchar *B,
uchar *L, uchar *M, uchar *S, int *N) {
int index;
index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < N[0]){
float r, g, b;
uchar l, m, s;
r = (float)R[index];
g = (float)G[index];
b = (float)B[index];
l = uchar(0.271873f*r + 0.661593f*g + 0.062627f*b);
m = uchar(0.099837f*r + 0.784534f*g + 0.111723f*b);
s = uchar(0.017750f*r + 0.109197f*g + 0.869146f*b);
L[index] = l;
M[index] = m;
S[index] = s;
}
}
/*
* XYZ (Adobe RGB [1998])
* ----------------------
* http://brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html
*/
__global__ void rgb2xyz(uchar *R, uchar *G, uchar *B,
uchar *X, uchar *Y, uchar *Z, int *N) {
int index;
index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < N[0]){
float r, g, b;
uchar x, y, z;
r = float(R[index]);
g = float(G[index]);
b = float(B[index]);
x = uchar(0.604410f*r + 0.194460f*g + 0.197220f*b);
y = uchar(0.296215f*r + 0.624898f*g + 0.074980f*b);
z = uchar(0.024732f*r + 0.064667f*g + 0.906695f*b);
X[index] = x;
Y[index] = y;
Z[index] = z;
}
}
/*
* CMY
* ---
*/
__global__ void rgb2cmy(uchar *R, uchar *G, uchar *B,
uchar *C, uchar *M, uchar *Y, int *N) {
int index;
index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < N[0]){
uchar r, g, b;
uchar c, m, y;
r = R[index];
g = G[index];
b = B[index];
c = 255 - r;
m = 255 - g;
y = 255 - b;
C[index] = c;
M[index] = m;
Y[index] = y;
}
}
/*
* HSL
* ---
*/
__global__ void rgb2hsl(uchar *R, uchar *G, uchar *B,
uchar *H, uchar *S, uchar *L, int *N) {
int index;
index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < N[0]){
float r, g, b, min, max, c;
float h, s, l;
r = float(R[index]);
g = float(G[index]);
b = float(B[index]);
// min/max calculate
max = fmaxf( r ,g);
max = fmaxf(max,b);
min = fminf( r ,g);
min = fminf(min,b);
c = max - min;
// Hue and chroma
if( max == r ){ h = fmodf((g-b)/c , 6.0f);
}else if( max == g ){ h = (b-r)/c + 2.0f ;
}else if( max == b ){ h = (r-g)/c + 4.0f ;
}else { h = 0.0f ;
}
h = h*255/360;
// Lightness
l = (max + min)/2.0;
// Saturation
s = 1.0 - fabsf(2.0*l-1.0);
if (s == 0.0f){
s = 0.0;
}else{
s = c/s;
}
H[index] = (uchar)h;
S[index] = (uchar)s;
L[index] = (uchar)l;
}
}
/*
* HSV
* ---
*/
__global__ void rgb2hsv(uchar *R, uchar *G, uchar *B,
uchar *H, uchar *S, uchar *V, int *N) {
int index;
index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < N[0]){
float r, g, b, min, max, c;
float h, s, v;
r = float(R[index]);
g = float(G[index]);
b = float(B[index]);
// min/max calculate
max = fmaxf( r ,g);
max = fmaxf(max,b);
min = fminf( r ,g);
min = fminf(min,b);
c = max - min;
// Hue and chroma
if( max == r ){ h = fmodf((g-b)/c , 6.0f);
}else if( max == g ){ h = (b-r)/c + 2.0f ;
}else if( max == b ){ h = (r-g)/c + 4.0f ;
}else { h = 0.0f ;
}
h = h*255/360;
// Lightness
v = max;
// Saturation
if (v == 0.0f){
s = 0.0;
}else{
s = c*2.55f/v;
}
H[index] = (uchar)h;
S[index] = (uchar)s;
V[index] = (uchar)v;
}
}
/*
* YIQ
* ---
*/
__global__ void rgb2yiq(uchar *R, uchar *G, uchar *B,
uchar *Y, uchar *I, uchar *Q, int *N) {
int index;
index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < N[0]){
float r, g, b;
uchar y, i, q;
r = float(R[index]);
g = float(G[index]);
b = float(B[index]);
y = uchar( 0.29783f*r + 0.58471f*g + 0.11355f*b );
i = uchar( 0.49805f*r - 0.22897f*g - 0.26908f*b + 127.5f );
q = uchar( 0.20093f*r - 0.49805f*g + 0.29711f*b + 127.5f );
Y[index] = y;
I[index] = i;
Q[index] = q;
}
}
/*
* YUV (BT.709)
* ------------
*/
__global__ void rgb2yuv(uchar *R, uchar *G, uchar *B,
uchar *Y, uchar *U, uchar *V, int *N) {
int index;
index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < N[0]){
float r, g, b;
uchar y, u, v;
r = float(R[index]);
g = float(G[index]);
b = float(B[index]);
y = (uchar)( 0.2126 *r + 0.7152 *g + 0.0722 *b);
u = (uchar)(-0.09991*r - 0.33609*g + 0.436 *b);
v = (uchar)( 0.6150 *r - 0.55861*g - 0.05639*b);
Y[index] = y;
U[index] = u;
V[index] = v;
}
}
/*
* YCbCr
* -----
*/
__global__ void rgb2yCbCr(uchar *R, uchar *G , uchar *B,
uchar *Y, uchar *Cb, uchar *Cr, int *N) {
int index;
index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < N[0]){
float r, g, b;
uchar y, cb, cr;
r = (float)R[index];
g = (float)G[index];
b = (float)B[index];
y = uchar( 0.211770f*r + 0.712406f*g + 0.071918f*b);
cb = uchar(-0.114130f*r - 0.383920f*g + 0.498050f*b + 127.50f);
cr = uchar( 0.498047f*r - 0.452380f*g - 0.045666f*b + 127.50f);
Y [index] = y;
Cb[index] = cb;
Cr[index] = cr;
}
}
/*
* Transform color models
* ----------------------
* Modelos de color:
* - 0: CMY - 4: LMS
* - 1: HSL - 5: YIQ
* - 2: HSV - 6: YUV
* - 3: XYZ - 7: YCbCr
*/
extern "C" void transformColorModel(uchar *h_R , uchar *h_G , uchar *h_B ,
uchar *h_C1, uchar *h_C2, uchar *h_C3,
int n, uint model){
uchar *d_R , *d_G , *d_B ,
*d_C1, *d_C2, *d_C3;
int *d_n;
cudaMalloc(&d_R , n * sizeof(uchar));
cudaMalloc(&d_G , n * sizeof(uchar));
cudaMalloc(&d_B , n * sizeof(uchar));
cudaMalloc(&d_C1, n * sizeof(uchar));
cudaMalloc(&d_C2, n * sizeof(uchar));
cudaMalloc(&d_C3, n * sizeof(uchar));
cudaMalloc(&d_n , sizeof( int ));
cudaMemcpy(d_R, h_R, n * sizeof(uchar), cudaMemcpyHostToDevice);
cudaMemcpy(d_G, h_G, n * sizeof(uchar), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, n * sizeof(uchar), cudaMemcpyHostToDevice);
cudaMemcpy(d_n, &n, sizeof( int ), cudaMemcpyHostToDevice);
int Nthreads = 128;
int Nblocks = (int)ceil( ((float)n+((float)Nthreads-1.0)) / ((float)Nthreads) );
switch(model){
// CMY
case 0:
rgb2cmy<<< Nblocks,Nthreads >>>(d_R ,d_G ,d_B ,
d_C1,d_C2,d_C3, d_n);
break;
// HSL
case 1:
rgb2hsl<<< Nblocks,Nthreads >>>(d_R ,d_G ,d_B ,
d_C1,d_C2,d_C3, d_n);
break;
// HSV
case 2:
rgb2hsv<<< Nblocks,Nthreads >>>(d_R ,d_G ,d_B ,
d_C1,d_C2,d_C3, d_n);
break;
// XYZ
case 3:
rgb2xyz<<< Nblocks,Nthreads >>>(d_R ,d_G ,d_B ,
d_C1,d_C2,d_C3, d_n);
break;
// LMS
case 4:
rgb2lms<<< Nblocks,Nthreads >>>(d_R ,d_G ,d_B ,
d_C1,d_C2,d_C3, d_n);
break;
// YIQ
case 5:
rgb2yiq<<< Nblocks,Nthreads >>>(d_R ,d_G ,d_B ,
d_C1,d_C2,d_C3, d_n);
break;
// YUV
case 6:
rgb2yuv<<< Nblocks,Nthreads >>>(d_R ,d_G ,d_B ,
d_C1,d_C2,d_C3, d_n);
break;
// YCbCr
case 7:
rgb2yCbCr<<< Nblocks,Nthreads >>>(d_R ,d_G ,d_B ,
d_C1,d_C2,d_C3, d_n);
break;
}
cudaMemcpy(h_C1, d_C1, n * sizeof(uchar), cudaMemcpyDeviceToHost);
cudaMemcpy(h_C2, d_C2, n * sizeof(uchar), cudaMemcpyDeviceToHost);
cudaMemcpy(h_C3, d_C3, n * sizeof(uchar), cudaMemcpyDeviceToHost);
cudaFree(d_n);
cudaFree(d_R);
cudaFree(d_G);
cudaFree(d_B);
cudaFree(d_C1);
cudaFree(d_C2);
cudaFree(d_C3);
}
|
3248a1268536534b894a54dc8b994cbd39855c18.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright (c) 2020 by Contributors
* \file array/cuda/csr2coo.cc
* \brief CSR2COO
*/
#include <dgl/array.h>
#include "../../runtime/cuda/cuda_common.h"
#include "./utils.h"
namespace dgl {
using runtime::NDArray;
namespace aten {
namespace impl {
template <DLDeviceType XPU, typename IdType>
COOMatrix CSRToCOO(CSRMatrix csr) {
LOG(FATAL) << "Unreachable codes";
return {};
}
template <>
COOMatrix CSRToCOO<kDLGPU, int32_t>(CSRMatrix csr) {
auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal();
// allocate cusparse handle if needed
if (!thr_entry->cusparse_handle) {
CUSPARSE_CALL(hipsparseCreate(&(thr_entry->cusparse_handle)));
}
CUSPARSE_CALL(hipsparseSetStream(thr_entry->cusparse_handle, thr_entry->stream));
NDArray indptr = csr.indptr, indices = csr.indices, data = csr.data;
const int32_t* indptr_ptr = static_cast<int32_t*>(indptr->data);
NDArray row = aten::NewIdArray(indices->shape[0], indptr->ctx, indptr->dtype.bits);
int32_t* row_ptr = static_cast<int32_t*>(row->data);
CUSPARSE_CALL(hipsparseXcsr2coo(
thr_entry->cusparse_handle,
indptr_ptr,
indices->shape[0],
csr.num_rows,
row_ptr,
HIPSPARSE_INDEX_BASE_ZERO));
return COOMatrix(csr.num_rows, csr.num_cols,
row, indices, data,
true, csr.sorted);
}
/*!
* \brief Repeat elements
* \param val Value to repeat
* \param repeats Number of repeats for each value
* \param pos The position of the output buffer to write the value.
* \param out Output buffer.
* \param length Number of values
*
* For example:
* val = [3, 0, 1]
* repeats = [1, 0, 2]
* pos = [0, 1, 1] # write to output buffer position 0, 1, 1
* then,
* out = [3, 1, 1]
*/
template <typename DType, typename IdType>
__global__ void _RepeatKernel(
const DType* val, const IdType* pos,
DType* out, int64_t n_row, int64_t length) {
IdType tx = static_cast<IdType>(blockIdx.x) * blockDim.x + threadIdx.x;
const int stride_x = gridDim.x * blockDim.x;
while (tx < length) {
IdType i = dgl::cuda::_UpperBound(pos, n_row, tx) - 1;
out[tx] = val[i];
tx += stride_x;
}
}
template <>
COOMatrix CSRToCOO<kDLGPU, int64_t>(CSRMatrix csr) {
const auto& ctx = csr.indptr->ctx;
const int64_t nnz = csr.indices->shape[0];
const auto nbits = csr.indptr->dtype.bits;
auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal();
IdArray rowids = Range(0, csr.num_rows, nbits, ctx);
IdArray ret_row = NewIdArray(nnz, ctx, nbits);
const int nt = 256;
const int nb = (nnz + nt - 1) / nt;
CUDA_KERNEL_CALL(_RepeatKernel,
nb, nt, 0, thr_entry->stream,
rowids.Ptr<int64_t>(),
csr.indptr.Ptr<int64_t>(), ret_row.Ptr<int64_t>(),
csr.num_rows, nnz);
return COOMatrix(csr.num_rows, csr.num_cols,
ret_row, csr.indices, csr.data,
true, csr.sorted);
}
template COOMatrix CSRToCOO<kDLGPU, int32_t>(CSRMatrix csr);
template COOMatrix CSRToCOO<kDLGPU, int64_t>(CSRMatrix csr);
template <DLDeviceType XPU, typename IdType>
COOMatrix CSRToCOODataAsOrder(CSRMatrix csr) {
LOG(FATAL) << "Unreachable codes";
return {};
}
template <>
COOMatrix CSRToCOODataAsOrder<kDLGPU, int32_t>(CSRMatrix csr) {
COOMatrix coo = CSRToCOO<kDLGPU, int32_t>(csr);
if (aten::IsNullArray(coo.data))
return coo;
auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal();
auto device = runtime::DeviceAPI::Get(coo.row->ctx);
// allocate cusparse handle if needed
if (!thr_entry->cusparse_handle) {
CUSPARSE_CALL(hipsparseCreate(&(thr_entry->cusparse_handle)));
}
CUSPARSE_CALL(hipsparseSetStream(thr_entry->cusparse_handle, thr_entry->stream));
NDArray row = coo.row, col = coo.col, data = coo.data;
int32_t* row_ptr = static_cast<int32_t*>(row->data);
int32_t* col_ptr = static_cast<int32_t*>(col->data);
int32_t* data_ptr = static_cast<int32_t*>(data->data);
size_t workspace_size = 0;
CUSPARSE_CALL(hipsparseXcoosort_bufferSizeExt(
thr_entry->cusparse_handle,
coo.num_rows, coo.num_cols,
row->shape[0],
data_ptr,
row_ptr,
&workspace_size));
void* workspace = device->AllocWorkspace(row->ctx, workspace_size);
CUSPARSE_CALL(hipsparseXcoosortByRow(
thr_entry->cusparse_handle,
coo.num_rows, coo.num_cols,
row->shape[0],
data_ptr,
row_ptr,
col_ptr,
workspace));
device->FreeWorkspace(row->ctx, workspace);
// The row and column field have already been reordered according
// to data, thus the data field will be deprecated.
coo.data = aten::NullArray();
coo.row_sorted = false;
coo.col_sorted = false;
return coo;
}
template <>
COOMatrix CSRToCOODataAsOrder<kDLGPU, int64_t>(CSRMatrix csr) {
COOMatrix coo = CSRToCOO<kDLGPU, int64_t>(csr);
if (aten::IsNullArray(coo.data))
return coo;
const auto& sorted = Sort(coo.data);
coo.row = IndexSelect(coo.row, sorted.second);
coo.col = IndexSelect(coo.col, sorted.second);
// The row and column field have already been reordered according
// to data, thus the data field will be deprecated.
coo.data = aten::NullArray();
coo.row_sorted = false;
coo.col_sorted = false;
return coo;
}
template COOMatrix CSRToCOODataAsOrder<kDLGPU, int32_t>(CSRMatrix csr);
template COOMatrix CSRToCOODataAsOrder<kDLGPU, int64_t>(CSRMatrix csr);
} // namespace impl
} // namespace aten
} // namespace dgl
| 3248a1268536534b894a54dc8b994cbd39855c18.cu | /*!
* Copyright (c) 2020 by Contributors
* \file array/cuda/csr2coo.cc
* \brief CSR2COO
*/
#include <dgl/array.h>
#include "../../runtime/cuda/cuda_common.h"
#include "./utils.h"
namespace dgl {
using runtime::NDArray;
namespace aten {
namespace impl {
template <DLDeviceType XPU, typename IdType>
COOMatrix CSRToCOO(CSRMatrix csr) {
LOG(FATAL) << "Unreachable codes";
return {};
}
template <>
COOMatrix CSRToCOO<kDLGPU, int32_t>(CSRMatrix csr) {
auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal();
// allocate cusparse handle if needed
if (!thr_entry->cusparse_handle) {
CUSPARSE_CALL(cusparseCreate(&(thr_entry->cusparse_handle)));
}
CUSPARSE_CALL(cusparseSetStream(thr_entry->cusparse_handle, thr_entry->stream));
NDArray indptr = csr.indptr, indices = csr.indices, data = csr.data;
const int32_t* indptr_ptr = static_cast<int32_t*>(indptr->data);
NDArray row = aten::NewIdArray(indices->shape[0], indptr->ctx, indptr->dtype.bits);
int32_t* row_ptr = static_cast<int32_t*>(row->data);
CUSPARSE_CALL(cusparseXcsr2coo(
thr_entry->cusparse_handle,
indptr_ptr,
indices->shape[0],
csr.num_rows,
row_ptr,
CUSPARSE_INDEX_BASE_ZERO));
return COOMatrix(csr.num_rows, csr.num_cols,
row, indices, data,
true, csr.sorted);
}
/*!
* \brief Repeat elements
* \param val Value to repeat
* \param repeats Number of repeats for each value
* \param pos The position of the output buffer to write the value.
* \param out Output buffer.
* \param length Number of values
*
* For example:
* val = [3, 0, 1]
* repeats = [1, 0, 2]
* pos = [0, 1, 1] # write to output buffer position 0, 1, 1
* then,
* out = [3, 1, 1]
*/
template <typename DType, typename IdType>
__global__ void _RepeatKernel(
const DType* val, const IdType* pos,
DType* out, int64_t n_row, int64_t length) {
IdType tx = static_cast<IdType>(blockIdx.x) * blockDim.x + threadIdx.x;
const int stride_x = gridDim.x * blockDim.x;
while (tx < length) {
IdType i = dgl::cuda::_UpperBound(pos, n_row, tx) - 1;
out[tx] = val[i];
tx += stride_x;
}
}
template <>
COOMatrix CSRToCOO<kDLGPU, int64_t>(CSRMatrix csr) {
const auto& ctx = csr.indptr->ctx;
const int64_t nnz = csr.indices->shape[0];
const auto nbits = csr.indptr->dtype.bits;
auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal();
IdArray rowids = Range(0, csr.num_rows, nbits, ctx);
IdArray ret_row = NewIdArray(nnz, ctx, nbits);
const int nt = 256;
const int nb = (nnz + nt - 1) / nt;
CUDA_KERNEL_CALL(_RepeatKernel,
nb, nt, 0, thr_entry->stream,
rowids.Ptr<int64_t>(),
csr.indptr.Ptr<int64_t>(), ret_row.Ptr<int64_t>(),
csr.num_rows, nnz);
return COOMatrix(csr.num_rows, csr.num_cols,
ret_row, csr.indices, csr.data,
true, csr.sorted);
}
template COOMatrix CSRToCOO<kDLGPU, int32_t>(CSRMatrix csr);
template COOMatrix CSRToCOO<kDLGPU, int64_t>(CSRMatrix csr);
template <DLDeviceType XPU, typename IdType>
COOMatrix CSRToCOODataAsOrder(CSRMatrix csr) {
LOG(FATAL) << "Unreachable codes";
return {};
}
template <>
COOMatrix CSRToCOODataAsOrder<kDLGPU, int32_t>(CSRMatrix csr) {
COOMatrix coo = CSRToCOO<kDLGPU, int32_t>(csr);
if (aten::IsNullArray(coo.data))
return coo;
auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal();
auto device = runtime::DeviceAPI::Get(coo.row->ctx);
// allocate cusparse handle if needed
if (!thr_entry->cusparse_handle) {
CUSPARSE_CALL(cusparseCreate(&(thr_entry->cusparse_handle)));
}
CUSPARSE_CALL(cusparseSetStream(thr_entry->cusparse_handle, thr_entry->stream));
NDArray row = coo.row, col = coo.col, data = coo.data;
int32_t* row_ptr = static_cast<int32_t*>(row->data);
int32_t* col_ptr = static_cast<int32_t*>(col->data);
int32_t* data_ptr = static_cast<int32_t*>(data->data);
size_t workspace_size = 0;
CUSPARSE_CALL(cusparseXcoosort_bufferSizeExt(
thr_entry->cusparse_handle,
coo.num_rows, coo.num_cols,
row->shape[0],
data_ptr,
row_ptr,
&workspace_size));
void* workspace = device->AllocWorkspace(row->ctx, workspace_size);
CUSPARSE_CALL(cusparseXcoosortByRow(
thr_entry->cusparse_handle,
coo.num_rows, coo.num_cols,
row->shape[0],
data_ptr,
row_ptr,
col_ptr,
workspace));
device->FreeWorkspace(row->ctx, workspace);
// The row and column field have already been reordered according
// to data, thus the data field will be deprecated.
coo.data = aten::NullArray();
coo.row_sorted = false;
coo.col_sorted = false;
return coo;
}
template <>
COOMatrix CSRToCOODataAsOrder<kDLGPU, int64_t>(CSRMatrix csr) {
COOMatrix coo = CSRToCOO<kDLGPU, int64_t>(csr);
if (aten::IsNullArray(coo.data))
return coo;
const auto& sorted = Sort(coo.data);
coo.row = IndexSelect(coo.row, sorted.second);
coo.col = IndexSelect(coo.col, sorted.second);
// The row and column field have already been reordered according
// to data, thus the data field will be deprecated.
coo.data = aten::NullArray();
coo.row_sorted = false;
coo.col_sorted = false;
return coo;
}
template COOMatrix CSRToCOODataAsOrder<kDLGPU, int32_t>(CSRMatrix csr);
template COOMatrix CSRToCOODataAsOrder<kDLGPU, int64_t>(CSRMatrix csr);
} // namespace impl
} // namespace aten
} // namespace dgl
|
multiplier.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <complex>
#include "multiplier.h"
#include "multiplier_three_cavity.h"
#include "multiplier_multimode.h"
#include "cu_mult.h"
//#include "print.h"
#define PAR ParamsM
#ifndef dm_Pi
__device__ const double dm_Pi = 3.141592653589793;
#endif
int Nz = 0;
double Lsolver = 0;
double MultiplierGroupSpeedCoefficient = 0;
__device__ double grSpeedCoeff;
int *d_Nz;
double *d_Lsolver;
//hipPitchedPtr d2_rJ3, d2_iJ3, d2_int_rJ3, d2_int_iJ3, d2_W;
//hipPitchedPtr d1_rJ3, d1_iJ3, d1_int_rJ3, d1_int_iJ3, d1_W;
__device__ void biReduce(double *A, double *B, int p0, int datasize, int logsize)
{
int stride = datasize;
for (int q = 1; q <= logsize; q++)
{
stride = stride >> 1;
if (p0 < stride)
{
A[p0] += A[p0 + stride];
}
else
{
if (p0 < 2 * stride)
{
B[p0 - stride] += B[p0];
}
}
__syncthreads();
}
}
__device__ double dh( double delta)
{
return delta*delta*(grSpeedCoeff);
}
__device__ void funcA (double z, double *rA, double *iA, double2 *Amps, int Na)
{
*rA = 0;
*iA = 0;
double rF, iF;
for(int i = 0; i < Na; i++)
{
sincos(z*double(i - Na/2), &iF, &rF);
*rA += Amps[i].x*rF - Amps[i].y*iF;
*iA += Amps[i].x*iF + Amps[i].y*rF;
}
}
__global__ void
__launch_bounds__ (512, 2)
MotionEquationMultiplier(PAR *par, double Lstop, int Nharm, double A, double2 B)//Fixed Structure
{
unsigned int p0 = threadIdx.x; unsigned int Np = blockDim.x;
unsigned int q0 = threadIdx.y; unsigned int Nq = blockDim.y;
unsigned int s0 = threadIdx.z; unsigned int Ns = blockDim.z;
unsigned int q_init = Nq*blockIdx.x + q0; unsigned int Nq_max = Nq*gridDim.x;
unsigned int s_init = Ns*blockIdx.y + s0; unsigned int Ns_max = Ns*gridDim.y;
unsigned int v_init = blockIdx.z; unsigned int Nv_max = gridDim.z;
int warpsize = Np*Nq*Ns;
int log2warpsize = round(log2((double)warpsize));
int X = blockIdx.x + gridDim.x*blockIdx.y + gridDim.y*gridDim.x*blockIdx.z;
double la, lb, ld, h, k1, voltage, g1, g3;
__shared__ double avEN, int_rJ3, int_iJ3;
int N;
double dz;
N = par->Nz;
la = par->la; lb = par->lb; ld = par->ld;
h = par->h; k1 = par->k1;
g1 = par->g1; g3 = par->g3;
voltage = par->voltage;
double *rJ3 = par->rJ3;
double *iJ3 = par->iJ3;
dz = par->L/(double)N;
double z;
int ifinal = floor(Lstop/dz);
double Q, Qk1, Qk2, Qk3, Qk4;
double W, Wk1, Wk2, Wk3, Wk4;
double fA, fB, rA, r;
double Wmax, Wmin;
double ifnotdestroyed = 1;
double R_cyclotron, R_center, kappa_cyclotron, phase_cyclotron, initial_angle, angle_spread_factor;
double wall = par->wall;
R_center = 0.5*wall + wall*((double)q_init-0.5*(double)Nq_max)/(double)(Nq_max);
initial_angle = 0;//(0.0810194 - 2.05972*R_center + 28.0433*R_center*R_center);
R_cyclotron = (0.568*initial_angle + 0*0.035156*((double)v_init)/double(Nv_max));
kappa_cyclotron = 1.758;
phase_cyclotron = 2.*dm_Pi*(double)s_init/(double)Ns_max;
/*R_center = 0.5*wall + wall*((double)q_init-0.5*(double)Nq_max)/(double)(Nq_max);
initial_angle = 0.087*((double)v_init - 0.5*Nv_max)/double(Nv_max);
R_cyclotron = 0.744*initial_angle;// f = 86.6 . 0,568; f = 95.5: 0.744
kappa_cyclotron = 1.344; // f = 86.6 . 1.758; f = 95.5: 1.344
phase_cyclotron = 2.*dm_Pi*(double)s_init/(double)Ns_max;*/
double en0 = 1. + voltage/511.;
angle_spread_factor = 1./sqrt(1. + initial_angle*initial_angle);
// wall. - initial_angle
Q = 2.*dm_Pi/double(Np)*double(p0);
W = 0;
__shared__ double sh_sinQ[NS*NQ*NP];
__shared__ double sh_cosQ[NS*NQ*NP];
/* __shared__ double shQ[NS][NQ][NP];
__shared__ double shW[NS][NQ][NP];
__shared__ double d2_rJ3[NQ][NP];
__shared__ double d2_iJ3[NQ][NP];
__shared__ double d1_rJ3[NP];
__shared__ double d1_iJ3[NP];*/
double PH, EN, cosPH, sinPH, cosPS, sinPS, rB, iB;
double H = h;//+dh(delta);
if(p0+q_init+s_init + v_init == 0)
{
rJ3[0] = 0;
iJ3[0] = 0;
}
if(p0+q0+s0== 0)
{
par->int_rJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0;
par->int_iJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0;
int_rJ3 = 0;
int_iJ3 = 0;
avEN = 0;
}
// if(s0+p0+q0 == 0) printf("la = %g, ld = %g, lb = %g \n", la, ld, lb);
int i = 0;
for(i = 1; i < N; i++)
{
/////////////////
z = (double)i*dz;
r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron));
ifnotdestroyed *= (r > -wall)? 1. : 0.; ///!!!!!!
// ifnotdestroyed = 1;
PH = Q;
EN = W + en0;
// if((s0+p0+q0 == 0)) printf("%g\t%g\n", z, fB);
fA = ((z<la)?sin(dm_Pi/la*z)* exp(-g1*r):0);
fB = (((z>la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld))*exp(-g3*r):0);
rA = A*fA;
rB = B.x*fB;
iB = B.y*fB;
sincos(PH, &sinPH, &cosPH);
sincos(3.*PH, &sinPS, &cosPS);
Qk1 = dz*(H - k1*EN*angle_spread_factor/sqrt(EN*EN-1.));
Wk1 = -dz*((rA*cosPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed;
/////////////////
z = ((double)i+0.5)*dz;
r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron));
// ifnotdestroyed *= (r > -wall)? 1. : 0.;
// ifnotdestroyed = 1;
PH = Q + 0.5*Qk1;
EN = W + 0.5*Wk1 + en0;
fA = ((z<la)?sin(dm_Pi/la*z) * exp(-g1*r):0);
fB = ((z>la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld))*exp(-g3*r):0;
rA = A*fA;
rB = B.x*fB;
iB = B.y*fB;
sincos(PH, &sinPH, &cosPH);
sincos(3.*PH, &sinPS, &cosPS);
Qk2 = dz*(H - k1*EN*angle_spread_factor/sqrt(EN*EN-1.));
Wk2 = -dz*((rA*cosPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed;
/////////////////
PH = Q + 0.5*Qk2;
EN = W + 0.5*Wk2 + en0;
sincos(PH, &sinPH, &cosPH);
sincos(3.*PH, &sinPS, &cosPS);
Qk3 = dz*(H - k1*EN*angle_spread_factor/sqrt(EN*EN-1.));
Wk3 = -dz*((rA*cosPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed;
/////////////////
z = ((double)i+1.)*dz;
r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron));
// ifnotdestroyed *= (r > -wall)? 1. : 0.;
// ifnotdestroyed = 1;
PH = Q + Qk3;
EN = W + Wk3 + en0;
fA = ((z<la)? sin(dm_Pi/la*z)* exp(-g1*r):0);
fB = (((z>la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld))*exp(-g3*r):0);
rA = A*fA;
rB = B.x*fB;
iB = B.y*fB;
sincos(PH, &sinPH, &cosPH);
sincos(3.*PH, &sinPS, &cosPS);
Qk4= dz*(H - k1*EN*angle_spread_factor/sqrt(EN*EN-1.));
Wk4= -dz*((rA*cosPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed;
///////////////
Q += 1./6.*(Qk1+2.*Qk2+2.*Qk3+Qk4);
W += 1./6.*(Wk1+2.*Wk2+2.*Wk3+Wk4);
/* shQ[s0][q0][p0] = Q;
shW[s0][q0][p0] = W;*/
__syncthreads();
sincos(double(Nharm)*Q, &sinPH, &cosPH);
if(Nharm == 1)
fB = ((z<la)?sin(dm_Pi/la*z):0)*exp(-g1*r); //fB !! fB q0, s0
else
fB = (((z>la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld)):0)*exp(-g3*r);
fB *= ifnotdestroyed;
int xi = p0 + Np*q0 + Np*Nq*s0;
sh_sinQ[xi] = sinPH*fB;
sh_cosQ[xi] = cosPH*fB;
unsigned int stride = warpsize;
__syncthreads();
for(int q = 1; q <= log2warpsize; q++)
{
stride = stride >> 1;//roundf(powf(2., q));
if(xi < stride)
{
sh_sinQ[xi] += sh_sinQ[xi + stride];
}
else
{
if(xi < 2*stride)
{
sh_cosQ[xi - stride] += sh_cosQ[xi];
}
}
__syncthreads();
}
// if((i == 1300)) printf("%g\n", Q);
if(xi == 0)
{
rJ3[X*N+i] = sh_cosQ[0];
iJ3[X*N+i] = -sh_sinQ[0];
// if((i == 1300)) printf("\n%g\n", sh_cosQ[0]);
int_rJ3 += sh_cosQ[0];
int_iJ3 += -sh_sinQ[0];
}
/* //////// Nharm
if(s0 == 0)
{
double tmp_rJ3 = 0, tmp_iJ3 = 0, tmpPhCycl = 0;
for(int ii = 0; ii < Ns; ii++)
{
int ii_init = Ns*blockIdx.y + ii;
tmpPhCycl = 2.*dm_Pi*(double)ii_init/(double)Ns_max;
r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + tmpPhCycl));
if(Nharm == 1)
fB = ((z<la)?sin(dm_Pi/la*z):0)*exp(-g1*r); //fB !! fB q0, s0
else
fB = (((z>la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld)):0)*exp(-g3*r);
fB *= ifnotdestroyed;
PH = shQ[ii][q0][p0];
sincos((double)Nharm*PH, &sinPS, &cosPS);
tmp_rJ3 += cosPS*fB;
tmp_iJ3 -= sinPS*fB;
}
d2_rJ3[q0][p0] = tmp_rJ3;
d2_iJ3[q0][p0] = tmp_iJ3;
}
__threadfence();
__syncthreads();
if(s0 + q0 == 0)
{
double tmp_rJ3 = 0, tmp_iJ3 = 0;
for(int ii = 0; ii < Nq; ii++)
{
tmp_rJ3 += d2_rJ3[ii][p0];
tmp_iJ3 += d2_iJ3[ii][p0];
}
d1_rJ3[p0] = tmp_rJ3;
d1_iJ3[p0] = tmp_iJ3;
}
__threadfence();
__syncthreads();
if(p0 + q0 +s0 == 0)
{
double tmp_rJ3 = 0, tmp_iJ3 = 0;
for(int ii = 0; ii < Np; ii++)
{
tmp_rJ3 += d1_rJ3[ii];
tmp_iJ3 += d1_iJ3[ii];
}
rJ3[i] = tmp_rJ3;
iJ3[i] = tmp_iJ3;
int_rJ3 += tmp_rJ3;
int_iJ3 += tmp_iJ3;
// if(i == ifinal) printf("<< %g %g\n", A*int_rJ3, A*tmp_rJ3);
}
__threadfence();
__syncthreads();
//////////////////// Nharm
*/
if(i == ifinal)
{
sh_sinQ[xi] = W;
__syncthreads();
stride = warpsize;
for(int q = 1; q <= log2warpsize; q++)
{
stride = stride >> 1;//warpsize/roundf(powf(2., q));
if(xi < stride)
{
sh_sinQ[xi] += sh_sinQ[xi + stride];
}
__syncthreads();
}
if(xi == 0)
{
avEN = sh_sinQ[0];
}
__syncthreads();
sh_sinQ[xi] = W;
sh_cosQ[xi] = W;
stride = warpsize;
for(int q = 1; q <= log2warpsize; q++)
{
stride = stride >> 1;// stride = warpsize/roundf(powf(2., q));
if(xi < stride)
{
sh_sinQ[xi] = (sh_sinQ[xi] > sh_sinQ[xi + stride]) ? sh_sinQ[xi] : sh_sinQ[xi + stride];
}
else
{
if(xi < 2*stride)
{
sh_cosQ[xi - stride] = (sh_cosQ[xi - stride] < sh_cosQ[xi]) ? sh_cosQ[xi - stride] : sh_cosQ[xi];
}
}
__syncthreads();
}
if(xi == 0)
{
Wmax = sh_sinQ[0];
Wmin = sh_cosQ[0];
}
/*
if(s0 == 0)
{
double tmp_W = 0;
for(int ii = 0; ii < Ns; ii++)
{
EN = shW[ii][q0][p0];
tmp_W += EN;
}
d2_rJ3[q0][p0] = tmp_W;
// if((p0 == 0)) printf(" %g >>, \n", d2_rJ3[q0][p0]);
}
__threadfence();
__syncthreads();
if(s0 + q0 == 0)
{
double tmp_rJ3 = 0;
for(int ii = 0; ii < Nq; ii++)
tmp_rJ3 += d2_rJ3[ii][p0];
d1_rJ3[p0] = tmp_rJ3;
}
__threadfence();
__syncthreads();
if(p0 + q0 +s0 == 0)
{
double tmp_rJ3 = 0;
for(int ii = 0; ii < Np; ii++)
tmp_rJ3 += d1_rJ3[ii];
(avEN) += tmp_rJ3;
}
*/
__syncthreads();
}
__threadfence();
__syncthreads();
if(i > ifinal) break;
}
// printf("END\t");
// if(p0 + s0 == 0) printf("(%i, %i, %i)...<%g, %g> =?= <%g>...\n", blockIdx.x, blockIdx.y, blockIdx.z,A*int_rJ3*dz, A*int_iJ3*dz, avEN);
__syncthreads();
// if(p0+q_init+s_init + v_init == 0)
if(p0+q0+s0 == 0)
{
/* printf("%i, %i, %i\t (%g, %g)\n",
blockIdx.x, blockIdx.y, blockIdx.z,
int_rJ3/double(Np*Nq_max*Ns_max*N)*(par->L),
int_iJ3/double(Np*Nq_max*Ns_max*N)*(par->L)) ;*/
par->avEN[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = avEN;
par->int_rJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_rJ3;
par->int_iJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_iJ3;
par->Wmax[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = Wmax;
par->Wmin[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = Wmin;
}
}
__global__ void
__launch_bounds__ (512, 2)
MotionEquationMultiplierDoubleScheme(PAR par, double Lstop, int Nharm, double A, double2 A2, double2 B)//Fixed Structure
{
unsigned int p0 = threadIdx.x;
unsigned int q0 = threadIdx.y;
unsigned int s0 = threadIdx.z;
unsigned int Np = blockDim.x;
unsigned int Nq = blockDim.y;
unsigned int Ns = blockDim.z;
unsigned int q_init = Nq*blockIdx.x + q0;
unsigned int s_init = Ns*blockIdx.y + s0;
unsigned int v_init = blockIdx.z;
unsigned int Nq_max = Nq*gridDim.x;
unsigned int Ns_max = Ns*gridDim.y;
unsigned int Nv_max = gridDim.z;
// printf("Thread %i/%i, %i/%i started; ", q0, Nq, p0, Np);
double la1, la2, lb, ld1, ld2, h, k1, voltage, g1, g3;
double fA2, rA2, iA2;
__shared__ double avEN, int_rJ3, int_iJ3;
// printf("Step0; ");
int N;
double dz;
N = par.Nz;
la1 = par.la1; la2 = par.la2; lb = par.lb;
ld1 = par.ld1; ld2 = par.ld2;
h = par.h; k1 = par.k1;
g1 = par.g1; g3 = par.g3;
voltage = par.voltage;
double la_tot = la1 + la2 + ld1;
double ifnotdestroyed = 1;
double *rJ3 = par.rJ3;
double *iJ3 = par.iJ3;
// double *int_rJ3 = par.int_rJ3;
// double *int_iJ3 = par.int_iJ3;
dz = par.L/(double)N;
double z;
int ifinal = floor(Lstop/dz);
double Q, Qk1, Qk2, Qk3, Qk4;
double W, Wk1, Wk2, Wk3, Wk4;
double fB;
double R_cyclotron, R_center, kappa_cyclotron, phase_cyclotron, initial_angle;
double wall = par.wall, r;
R_center = 0.5*wall + wall*((double)q_init-0.5*(double)Nq_max)/(double)(Nq_max);
initial_angle = (0.0810194 - 2.05972*R_center + 28.0433*R_center*R_center);
R_cyclotron = 0.568*initial_angle + 0.035156*((double)v_init)/double(Nv_max);
kappa_cyclotron = 1.758;
phase_cyclotron = 2.*dm_Pi*(double)s_init/(double)Ns_max;
double en0 = 1. + voltage/511.;
en0 -= 0.5*initial_angle*initial_angle*(en0*en0 - 1)*en0;
Q = 2.*dm_Pi/double(Np)*double(p0);// + 1./(double)Nq*((double)q0 + (double)s0/(double)Ns));
W = 0;
__shared__ double shQ[NS][NQ][NP];
__shared__ double shW[NS][NQ][NP];
__shared__ double d2_rJ3[NQ][NP];
__shared__ double d2_iJ3[NQ][NP];
__shared__ double d1_rJ3[NP];
__shared__ double d1_iJ3[NP];
double PH, EN, cosPH, sinPH, cosPS, sinPS, rA, rB, iB;
// printf("Step3; ");
double H = h;//+dh(delta);
if(p0+q_init+s_init + v_init == 0)
{
rJ3[0] = 0;
iJ3[0] = 0;
// printf("init:\t%i\t%i\t%i\t%i...........\n",p0, q_init,s_init,v_init);
}
if(p0+q0+s0== 0)
{
par.int_rJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0;
par.int_iJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0;
int_rJ3 = 0;
int_iJ3 = 0;
avEN = 0;
}
int i = 0;
for(i = 1; i < N; i++)
{
/////////////////
z = (double)i*dz;
r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron));
ifnotdestroyed *= (r > -wall)? 1. : 0.;
PH = Q;
EN = W + en0;
rA = A *((z<la1) ? sin(dm_Pi/la1*z) *exp(-g1*r):0);
fA2= ( ((la1+ld1<z)&&(z<la_tot))? sin(dm_Pi/la2*(z-la1 - ld1)) *exp(-g1*r):0);
rA2 = A2.x*fA2;
iA2 = A2.y*fA2;
fB = ( ((z> la_tot+ld2)&&(z < la_tot+ld2+lb))?sin(dm_Pi/lb*(z-la_tot-ld2)) *exp(-g3*r):0);
rB = B.x*fB;
iB = B.y*fB;
sincos(PH, &sinPH, &cosPH);
sincos(3.*PH, &sinPS, &cosPS);
Qk1 = dz*(H - k1*EN/sqrt(EN*EN-1.));
Wk1 = -dz*((rA*cosPH)+(rA2*cosPH-iA2*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed;
/////////////////
z = ((double)i+0.5)*dz;
r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron));
ifnotdestroyed *= (r > -wall)? 1. : 0.;
PH = Q + 0.5*Qk1;
EN = W + 0.5*Wk1 + en0;
rA = A *((z<la1) ? sin(dm_Pi/la1*z) *exp(-g1*r):0);
fA2= ( ((la1+ld1<z)&&(z<la_tot))? sin(dm_Pi/la2*(z-la1 - ld1)) *exp(-g1*r):0);
rA2 = A2.x*fA2;
iA2 = A2.y*fA2;
fB = ( ((z> la_tot+ld2)&&(z < la_tot+ld2+lb))?sin(dm_Pi/lb*(z-la_tot-ld2)) *exp(-g3*r):0);
rB = B.x*fB;
iB = B.y*fB;
sincos(PH, &sinPH, &cosPH);
sincos(3.*PH, &sinPS, &cosPS);
Qk2 = dz*(H - k1*EN/sqrt(EN*EN-1.));
Wk2 = -dz*((rA*cosPH)+(rA2*cosPH-iA2*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed;
/////////////////
PH = Q + 0.5*Qk2;
EN = W + 0.5*Wk2 + en0;
sincos(PH, &sinPH, &cosPH);
sincos(3.*PH, &sinPS, &cosPS);
Qk3 = dz*(H - k1*EN/sqrt(EN*EN-1.));
Wk3 = -dz*((rA*cosPH)+(rA2*cosPH-iA2*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed;
/////////////////
z = ((double)i+1)*dz;
r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron));
ifnotdestroyed *= (r > -wall)? 1. : 0.;
PH = Q + Qk3;
EN = W + Wk3 + en0;
rA = A *((z<la1) ? sin(dm_Pi/la1*z) *exp(-g1*r):0);
fA2= ( ((la1+ld1<z)&&(z<la_tot))? sin(dm_Pi/la2*(z-la1 - ld1)) *exp(-g1*r):0);
rA2 = A2.x*fA2;
iA2 = A2.y*fA2;
fB = ( ((z> la_tot+ld2)&&(z < la_tot+ld2+lb))?sin(dm_Pi/lb*(z-la_tot-ld2)) *exp(-g3*r):0);
rB = B.x*fB;
iB = B.y*fB;
sincos(PH, &sinPH, &cosPH);
sincos(3.*PH, &sinPS, &cosPS);
Qk4= dz*(H - k1*EN/sqrt(EN*EN-1.));
Wk4= -dz*((rA*cosPH)+(rA2*cosPH-iA2*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed;
///////////////
Q += 1./6.*(Qk1+2.*Qk2+2.*Qk3+Qk4);
W += 1./6.*(Wk1+2.*Wk2+2.*Wk3+Wk4);
// printf("#< %i -> (%i, %i, %i)>\t", i, s0, q0, p0);
// printf("#<%i>", Np_Q*Nq*(Ns*i+s0) + Np_Q*q0 + p0);
// if(q0+p0+s0 == 0) printf("%i", i);
shQ[s0][q0][p0] = Q;
shW[s0][q0][p0] = W;
__threadfence();
__syncthreads();
//////// -
if(s0 == 0)
{
double tmp_rJ3 = 0, tmp_iJ3 = 0;
for(int ii = 0; ii < Ns; ii++)
{
PH = shQ[ii][q0][p0];
sincos((double)Nharm*PH, &sinPS, &cosPS);
tmp_rJ3 += cosPS;
tmp_iJ3 -= sinPS;
}
d2_rJ3[q0][p0] = tmp_rJ3;
d2_iJ3[q0][p0] = tmp_iJ3;
}
__threadfence();
__syncthreads();
if(s0 + q0 == 0)
{
double tmp_rJ3 = 0, tmp_iJ3 = 0;
for(int ii = 0; ii < Nq; ii++)
{
tmp_rJ3 += d2_rJ3[ii][p0];
tmp_iJ3 += d2_iJ3[ii][p0];
}
d1_rJ3[p0] = tmp_rJ3;
d1_iJ3[p0] = tmp_iJ3;
}
__threadfence();
__syncthreads();
if(p0 + q0 +s0 == 0)
{
double tmp_rJ3 = 0, tmp_iJ3 = 0;
for(int ii = 0; ii < Np; ii++)
{
tmp_rJ3 += d1_rJ3[ii];
tmp_iJ3 += d1_iJ3[ii];
}
rJ3[i] = tmp_rJ3;
iJ3[i] = tmp_iJ3;
int_rJ3 += tmp_rJ3*((Nharm == 3)?fB:fA2);
int_iJ3 += tmp_iJ3*((Nharm == 3)?fB:fA2);
}
__threadfence();
__syncthreads();
//////////////////// -
// if((q0+p0 == 0)&&(s0 == 0)) printf("%i\t%g\t%g\n", q0, PH, EN);
// if(q0+p0+s0 == 0) printf("....%i\t", i);
///////////////////////
if(i == ifinal)
{
if(s0 == 0)
{
double tmp_W = 0;
for(int ii = 0; ii < Ns; ii++)
{
EN = shW[ii][q0][p0];
tmp_W += EN;
}
d2_rJ3[q0][p0] = W;
}
__threadfence();
__syncthreads();
if(s0 + q0 == 0)
{
double tmp_rJ3 = 0;
for(int ii = 0; ii < Nq; ii++)
tmp_rJ3 += d2_rJ3[ii][p0];
d1_rJ3[p0] = tmp_rJ3;
}
__threadfence();
__syncthreads();
if(p0 + q0 +s0 == 0)
{
double tmp_rJ3 = 0;
for(int ii = 0; ii < Np; ii++)
tmp_rJ3 += d1_rJ3[ii];
(avEN) += tmp_rJ3;
}
}
/////////////////
__threadfence();
__syncthreads();
if(i > ifinal) break;
}
__syncthreads();
if(p0+q0+s0 == 0)
{
*par.avEN = avEN;
par.int_rJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_rJ3;
par.int_iJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_iJ3;
}
}
__global__ void
__launch_bounds__ (512, 2)
MotionEquationMultiplierMultiModes(PAR par, double Lstop, int Nharm, int Na, double2 B)//Fixed Structure
{
unsigned int p0 = threadIdx.x;
unsigned int q0 = threadIdx.y;
unsigned int s0 = threadIdx.z;
unsigned int Np = blockDim.x;
unsigned int Nq = blockDim.y;
unsigned int Ns = blockDim.z;
unsigned int q_init = Nq*blockIdx.x + q0;
unsigned int s_init = Ns*blockIdx.y + s0;
unsigned int v_init = blockIdx.z;
unsigned int Nq_max = Nq*gridDim.x;
unsigned int Ns_max = Ns*gridDim.y;
// unsigned int Nv_max = gridDim.z;
int warpsize = Np*Nq*Ns;
int log2warpsize = round(log2((double)warpsize));
double la, lb, ld, h, k1, voltage, g1, g3;
double rA1, iA1;
__shared__ double avEN, int_rJ3, int_iJ3, int_rJ3_1, int_iJ3_1;
int N;
double dz;
N = par.Nz;
la = par.la1; lb = par.lb;
ld = par.ld;
h = par.h; k1 = par.k1;
g1 = par.g1; g3 = par.g3;
voltage = par.voltage;
double ifnotdestroyed = 1;
double *rJ3 = par.rJ3;
double *iJ3 = par.iJ3;
double2 *Amps = (double2 *)par.Amps;
// double *int_rJ3 = par.int_rJ3;
// double *int_iJ3 = par.int_iJ3;
dz = par.L/(double)N;
double z;
int ifinal = floor(Lstop/dz);
double Q, Qk1, Qk2, Qk3, Qk4;
double W, Wk1, Wk2, Wk3, Wk4;
double fB;
double R_cyclotron, R_center, kappa_cyclotron, phase_cyclotron, initial_angle;
double wall = par.wall, r;
R_center = 0.5*wall + wall*((double)q_init-0.5*(double)Nq_max)/(double)(Nq_max);
initial_angle = 0;//(0.0810194 - 2.05972*R_center + 28.0433*R_center*R_center);
R_cyclotron = 0;//0.568*initial_angle + 0.035156*((double)v_init)/double(Nv_max);
kappa_cyclotron = 1.758;
phase_cyclotron = 2.*dm_Pi*(double)s_init/(double)Ns_max;
double en0 = 1. + voltage/511.;
en0 -= 0.5*initial_angle*initial_angle*(en0*en0 - 1)*en0;
double beta0 = sqrt(en0*en0 - 1)/en0;
// double Delta = k1*dm_Pi/(la*beta0) ;// \delta f / f = (k_0 \pi /L)/beta_ph
Q = 2.*dm_Pi/double(Np)*double(p0);// + 1./(double)Nq*((double)q0 + (double)s0/(double)Ns));
W = 0;
__shared__ double2 shAmps[NP];
__shared__ double sh_sinQ[NS*NQ*NP];
__shared__ double sh_cosQ[NS*NQ*NP];
double PH, EN, cosPH, sinPH, cosPS, sinPS, rB, iB;
double H = h;//+dh(delta);
if(p0+q_init+s_init + v_init == 0)
{
rJ3[0] = 0;
iJ3[0] = 0;
}
if(p0+q0+s0== 0)
{
par.int_rJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0;
par.int_iJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0;
par.int_rJ3_1[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0;
par.int_iJ3_1[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0;
int_rJ3 = 0;
int_iJ3 = 0;
int_rJ3_1 = 0;
int_iJ3_1 = 0;
avEN = 0;
}
if((q0 + s0 == 0)&&(p0 < Na))
{
shAmps[p0] = Amps[p0];
}
__syncthreads();
int i = 0;
for(i = 1; i < N; i++)
{
/////////////////
z = (double)i*dz;
r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron));
ifnotdestroyed *= 1;//(r > -wall)? 1. : 0.;
PH = Q;
EN = W + en0;
funcA(dm_Pi/la*z, &rA1, &iA1, shAmps, Na);
if(z > la) {rA1 =0; iA1 = 0;}
rA1 *= exp(-g1*r); iA1 *= exp(-g1*r);
fB = ( ((z> la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld)) *exp(-g3*r):0);
rB = 0;//B.x*fB;
iB = 0;//B.y*fB;
sincos(PH, &sinPH, &cosPH);
sincos(3.*PH, &sinPS, &cosPS);
Qk1 = dz*(H - k1*EN/sqrt(EN*EN-1.));
Wk1 = -dz*((rA1*cosPH - iA1*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed;
// if(s0 + p0 + q0 == 0 && (i == 1)) printf("%g,%g,%g,%g\n", r, g1, Qk1, Wk1);
/////////////////
z = ((double)i+0.5)*dz;
r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron));
// ifnotdestroyed *= (r > -wall)? 1. : 0.;
PH = Q + 0.5*Qk1;
EN = W + 0.5*Wk1 + en0;
if(z > la) {rA1 =0; iA1 = 0;}
fB = ( ((z> la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld)) *exp(-g3*r):0);
rB = 0;//B.x*fB;
iB = 0;//B.y*fB;
sincos(PH, &sinPH, &cosPH);
sincos(3.*PH, &sinPS, &cosPS);
Qk2 = dz*(H - k1*EN/sqrt(EN*EN-1.));
Wk2 = -dz*((rA1*cosPH - iA1*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed;
/////////////////
PH = Q + 0.5*Qk2;
EN = W + 0.5*Wk2 + en0;
sincos(PH, &sinPH, &cosPH);
sincos(3.*PH, &sinPS, &cosPS);
Qk3 = dz*(H - k1*EN/sqrt(EN*EN-1.));
Wk3 = -dz*((rA1*cosPH - iA1*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed;
/////////////////
z = ((double)i+1)*dz;
r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron));
// ifnotdestroyed *= (r > -wall)? 1. : 0.;
PH = Q + Qk3;
EN = W + Wk3 + en0;
if(z > la) {rA1 =0; iA1 = 0;}
fB = ( ((z> la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld)) *exp(-g3*r):0);
rB = 0;//B.x*fB;
iB = 0;//B.y*fB;
sincos(PH, &sinPH, &cosPH);
sincos(3.*PH, &sinPS, &cosPS);
Qk4= dz*(H - k1*EN/sqrt(EN*EN-1.));
Wk4= -dz*((rA1*cosPH - iA1*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed;
///////////////
Q += 1./6.*(Qk1+2.*Qk2+2.*Qk3+Qk4);
W += 1./6.*(Wk1+2.*Wk2+2.*Wk3+Wk4);
__syncthreads();
sincos(double(Nharm)*Q, &sinPH, &cosPH);
if(Nharm == 1)
fB = exp(-g1*r); //fB !! fB q0, s0
else
fB = (((z>la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld)):0)*exp(-g3*r);
fB *= ifnotdestroyed;
int xi = p0 + Np*q0 + Np*Nq*s0;
int X = blockIdx.x + gridDim.x*blockIdx.y + gridDim.y*gridDim.x*blockIdx.z;
sh_sinQ[xi] = sinPH*fB;
sh_cosQ[xi] = cosPH*fB;
__syncthreads();
biReduce(sh_sinQ, sh_cosQ, xi, warpsize, log2warpsize);
if(xi == 0)
{
rJ3[X*N+i] = sh_cosQ[0];
iJ3[X*N+i] = -sh_sinQ[0];
int_rJ3 += sh_cosQ[0];
int_iJ3 += -sh_sinQ[0];
}
///////////////////////
if(i == ifinal)
{
sh_sinQ[xi] = W;
__syncthreads();
biReduce(sh_sinQ, sh_cosQ, xi, warpsize, log2warpsize);
if(xi == 0)
{
avEN = sh_sinQ[0];
}
__syncthreads();
}
/////////////////
__threadfence();
__syncthreads();
if(i > ifinal) break;
}
__syncthreads();
if(p0+q0+s0 == 0)
{
*par.avEN = avEN;
par.int_rJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_rJ3;
par.int_iJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_iJ3;
par.int_rJ3_1[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_rJ3_1;
par.int_iJ3_1[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_iJ3_1;
}
}
std::complex<double> Multiplier::retriveBCurr()
{
int GQ = Nq/NQ; int GS = Ns/NS; int GV = Nv;
double t_deltaEn[512]; double t_deltaEn2[512];
double reJ = 0, imJ = 0;
// printf("memcpy: %i\t", hipMemcpy((void *)&t_deltaEn, d_int_rJ3, sizeof(double), hipMemcpyDeviceToHost));
// printf("memcpy: %i\n", hipMemcpy((void *)&t_deltaEn2, d_int_iJ3, sizeof(double), hipMemcpyDeviceToHost));
hipMemcpy((void *) t_deltaEn, d_int_rJ3, sizeof(double)*GQ*GS*GV, hipMemcpyDeviceToHost);
hipMemcpy((void *) t_deltaEn2, d_int_iJ3, sizeof(double)*GQ*GS*GV, hipMemcpyDeviceToHost);
for(int i = 0; i < GQ*GS*GV; i++){
reJ += t_deltaEn[i]; imJ += t_deltaEn2[i];
}
double coeff = Lsolver/double(Nz*Np*Nq*Ns*Nv);
// printf("re = %g, im = %g\n", reJ*coeff, imJ*coeff);
std::complex<double> res = std::complex<double> (reJ*coeff, imJ*coeff);
return res;
}
void Multiplier::retriveBCurr(std::complex<double> *J1, std::complex<double> *J2)
{
int GQ = Nq/NQ; int GS = Ns/NS; int GV = Nv;
double t_Jre[512]; double t_Jim[512];
double t_J2re[512]; double t_J2im[512];
double reJ = 0, imJ = 0;
double re2J = 0, im2J = 0;
// printf("memcpy: %i\t", hipMemcpy((void *)&t_deltaEn, d_int_rJ3, sizeof(double), hipMemcpyDeviceToHost));
// printf("memcpy: %i\n", hipMemcpy((void *)&t_deltaEn2, d_int_iJ3, sizeof(double), hipMemcpyDeviceToHost));
hipMemcpy((void *) t_Jre, d_int_rJ3, sizeof(double)*GQ*GS*GV, hipMemcpyDeviceToHost);
hipMemcpy((void *) t_Jim, d_int_iJ3, sizeof(double)*GQ*GS*GV, hipMemcpyDeviceToHost);
hipMemcpy((void *) t_J2re, d_int_rJ3_1, sizeof(double)*GQ*GS*GV, hipMemcpyDeviceToHost);
hipMemcpy((void *) t_J2im, d_int_iJ3_1, sizeof(double)*GQ*GS*GV, hipMemcpyDeviceToHost);
for(int i = 0; i < GQ*GS*GV; i++){
reJ += t_Jre[i]; imJ += t_Jim[i];
re2J += t_J2re[i]; im2J += t_J2im[i];
}
double coeff = Lsolver/double(Nz*Np*Nq*Ns*Nv);
// printf("re = %g, im = %g\n", reJ*coeff, imJ*coeff);
std::complex<double> res1 = std::complex<double> (reJ*coeff, imJ*coeff);
std::complex<double> res2 = std::complex<double> (re2J*coeff, im2J*coeff);
*J1 = res1; *J2 = res2;
// printf("J1 = %g, %g\tJ2 = %g, %g\n", *J1, *J2);
}
double Multiplier::retriveDeltaEnergy()
{
int GQ = Nq/NQ; int GS = Ns/NS; int GV = Nv;
double t_deltaEn[512];
double t_wmax[512];
double t_wmin[512];
double averagedEn = 0, wmax = -99999, wmin = 99999;
hipMemcpy( t_deltaEn, d_avEN, sizeof(double)*GQ*GS*GV, hipMemcpyDeviceToHost);
hipMemcpy( t_wmax, d_Wmax, sizeof(double)*GQ*GS*GV, hipMemcpyDeviceToHost);
hipMemcpy( t_wmin, d_Wmin, sizeof(double)*GQ*GS*GV, hipMemcpyDeviceToHost);
for(int i = 0; i < GQ*GS*GV; i++)
{
wmax =(wmax > t_wmax[i]) ? wmax : t_wmax[i];
wmin =(wmin < t_wmin[i]) ? wmin : t_wmin[i];
averagedEn += t_deltaEn[i];
// printf("%g\n", t_deltaEn[i]/double(NP*NQ*NS));
}
double coeff = 1./double(Np*Nq*Ns*Nv);
// printf("deltaW + = %g \t deltaW - = %g\n", wmax*511000., wmin*511000.);
return averagedEn*coeff;
}
bool Device::initSolver(int nz, double lsolver, double groupSpeedCoeff, char *_solverName)
{
Nz = nz;
Lsolver = lsolver;
Lmax = lsolver;
solverName = _solverName;
Nmax = nz;
MultiplierGroupSpeedCoefficient = groupSpeedCoeff;
printf("The %s solver is intialized\n", solverName);
int GQ = Nq/NQ; int GS = Ns/NS; int GV = Nv;
// printf(" Nq %i, Ns %i, Nv %i \t GQ %i, GS %i, GV %i \n",Nq, Ns, Nv, GQ, GS, GV);
printf("Nz, Lsolver, grSpeed, %i, %g, %g\n", Nz, Lsolver,MultiplierGroupSpeedCoefficient);
gpuErrChk(hipMalloc((void**)&d_rJ3, Nz*GQ*GS*GV*sizeof(double)));
gpuErrChk(hipMalloc((void**)&d_iJ3, Nz*GQ*GS*GV*sizeof(double)));
gpuErrChk(hipMalloc((void**)&d_Nz, sizeof(int)));
gpuErrChk(hipMalloc((void**)&d_Lsolver, sizeof(double)));
gpuErrChk(hipMalloc((void**)&d_avEN, sizeof(double)*GQ*GS*GV));
gpuErrChk(hipMalloc((void**)&d_int_rJ3_1, sizeof(double)*GQ*GS*GV));
gpuErrChk(hipMalloc((void**)&d_int_iJ3_1, sizeof(double)*GQ*GS*GV));
gpuErrChk(hipMalloc((void**)&d_Amps, sizeof(cplx) * 30));
if(strcmp(solverName,"multiplier_spcharge_2d") != 0)
{
gpuErrChk(hipMalloc((void**)&d_int_rJ3, sizeof(double)*GQ*GS*GV));
gpuErrChk(hipMalloc((void**)&d_int_iJ3, sizeof(double)*GQ*GS*GV));
}
gpuErrChk(hipMalloc((void**)&d_Wmax, sizeof(double)*GQ*GS*GV));
gpuErrChk(hipMalloc((void**)&d_Wmin, sizeof(double)*GQ*GS*GV));
gpuErrChk(hipMalloc((void**)&d_par, sizeof(PAR)));
gpuErrChk(hipMalloc((void**)&grSpeedCoeff, sizeof(double)));
gpuErrChk(hipMemcpy((void*)d_Nz, &Nz, sizeof(int), hipMemcpyHostToDevice));
gpuErrChk(hipMemcpy((void*)&grSpeedCoeff, &MultiplierGroupSpeedCoefficient, sizeof(double), hipMemcpyHostToDevice)); // TODO Here is a bug
gpuErrChk(hipMemcpy((void*)d_Lsolver, (void*)&Lsolver, sizeof(double), hipMemcpyHostToDevice));
return 1;
}
void Device::releaseDeviceMemory()
{
hipFree((void*)d_Nz);
hipFree((void*)d_Lsolver);
hipFree((void*)d_avEN);
hipFree((void*)d_int_rJ3);
hipFree((void*)d_int_iJ3);
if(fieldLoaded)
{
hipFree((void*) d_tAr);
hipFree((void*) d_tAi);
}
}
double Multiplier::DeltaEnergy(double A)
{
PAR par;
double d = period;
double h = 2.*Pi/d;
double La = period*double(Nperiods);
par.la = La; par.lb = Lb; par.ld = Ld; par.k1 = k1; par.h = h; par.voltage = voltage;
par.Nz = Nz; par.L = Lsolver; par.wall = wall;
par.g1 = g1; par.g3 = g3;
par.Wmax = d_Wmax; par.Wmin = d_Wmin;
par.rJ3 = d_rJ3; par.iJ3 = d_iJ3;
par.avEN = d_avEN;
par.int_rJ3 = d_int_rJ3;
par.int_iJ3 = d_int_iJ3;
double2 zero = {0,0};
// hipMemcpy( d_rJ3, &dzero, sizeof(double), hipMemcpyHostToDevice);
// hipMemcpy( d_iJ3, &dzero, sizeof(double), hipMemcpyHostToDevice);
// hipMemcpy(d_int_rJ3, &dzero, sizeof(double), hipMemcpyHostToDevice);
// hipMemcpy(d_int_iJ3, &dzero, sizeof(double), hipMemcpyHostToDevice);
// hipMemcpy( d_avEN, &dzero, sizeof(double), hipMemcpyHostToDevice);
dim3 threadsPerBlock(NP, NQ, NS);
hipMemcpy(d_par, &par, sizeof(PAR), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( MotionEquationMultiplier), dim3(dim3((size_t) Nq/NQ,(size_t) Ns/NS,(size_t) Nv)), dim3(threadsPerBlock), 0, 0, d_par, La, 1, A, zero);
/* double *debRe = new double [Nz];
double *debIm = new double [Nz];
hipError_t copy1 = hipMemcpy((void*) debRe, (void *)dm_rJq, sizeof(double)*Nz, hipMemcpyDeviceToHost);
printf("copy1 = %i \n", copy1);
hipError_t copy2 = hipMemcpy((void*) debIm, (void *)dm_iJq, sizeof(double)*Nz, hipMemcpyDeviceToHost);
printf("copy2 = %i \n", copy2);
*/
//printf("memcpy: %i \n", hipMemcpy((void*) &t_deltaEn, d_avEN, sizeof(double), hipMemcpyDeviceToHost));
//printf("Energy delta = %g \n", t_deltaEn/double(NP*NQ*NS));
double res = retriveDeltaEnergy();
// printf("Retrieve returned: %g \n", res);
return res;
// delete[] debRe; delete[] debIm;
}
std::complex<double> Multiplier::CurrentB(double reB, double imB, double A)
{
PAR par;
double d = period;
double h = 2.*Pi/d;
double La = period*double(Nperiods);
par.la = La; par.lb = Lb; par.ld = Ld; par.k1 = k1; par.h = h; par.voltage = voltage;
par.Nz = Nz; par.L = Lsolver; par.wall = wall;
par.g1 = g1; par.g3 = g3;
par.Wmax = d_Wmax; par.Wmin = d_Wmin;
// printf("CurrentB: %g, %g, %g \n", La, Ld, Lb);
hipMemset(d_rJ3, 0, sizeof(double)*Nz);
hipMemset(d_iJ3, 0, sizeof(double)*Nz);
par.rJ3 = d_rJ3; par.iJ3 = d_iJ3;
par.avEN = d_avEN;
par.int_rJ3 = d_int_rJ3;
par.int_iJ3 = d_int_iJ3;
double2 B; B.x = reB; B.y = imB;
// printf("\n B loop: %g\n", La+Ld+Lb );
// printf("\n Threads: %i, %i, %i\n", threadsPerBlock.x, threadsPerBlock.y, threadsPerBlock.z );
dim3 numblocks(Nq/NQ, Ns/NS, Nv);
dim3 threadsPerBlock(NP, NQ, NS);
// hipMemcpy( d_rJ3, &dzero, sizeof(double), hipMemcpyHostToDevice);
// hipMemcpy( d_iJ3, &dzero, sizeof(double), hipMemcpyHostToDevice);
// hipMemcpy(d_int_rJ3, &dzero, sizeof(double), hipMemcpyHostToDevice);
// hipMemcpy(d_int_iJ3, &dzero, sizeof(double), hipMemcpyHostToDevice);
// hipMemcpy( d_avEN, &dzero, sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_par, &par, sizeof(PAR), hipMemcpyHostToDevice);
MotionEquationMultiplier << <numblocks, threadsPerBlock >> >(d_par, La + Ld + Lb, 3, A, B);
double *jr = new double [Nz];
double *ji = new double [Nz];
hipMemcpy(jr, d_rJ3, sizeof(double)*Nz, hipMemcpyDeviceToHost);
hipMemcpy(ji, d_iJ3, sizeof(double)*Nz, hipMemcpyDeviceToHost);
FILE *resamp_ar = fopen("F:\\Piotr\\bwo_Data\\mdebug_jr.csv", "w");
FILE *resamp_ai = fopen("F:\\Piotr\\bwo_Data\\mdebug_ji.csv", "w");
for(int j = 0; j < Nz; j++)
{
fprintf(resamp_ar, "%i,%g\n", j, jr[j]);
fprintf(resamp_ai, "%i,%g\n", j, ji[j]);
}
fclose(resamp_ar);
fclose(resamp_ai);
delete []jr;
delete []ji;
return retriveBCurr();
}
std::complex<double> MultiplierThreeCavity::CurrentB2(double reB, double imB, double A, cplx A2)
{
PAR par;
double d = period;
double h = 2.*Pi/d;
double La1 = period*(double)Nperiods;
// printf("CurrentB2: %g, %g, %g, %g, %g \n", La1, Ld1, La2, Ld2, Lb);
par.la1 = La1; par.lb = Lb; par.ld1 = Ld1; par.k1 = k1; par.h = h; par.voltage = voltage;
par.la2 = La2; par.ld2 = Ld2;
par.Nz = Nz; par.L = Lsolver;
par.wall = wall; par.g1 = g1; par.g3 = g3;
par.Wmax = d_Wmax; par.Wmin = d_Wmin;
par.rJ3 = d_rJ3; par.iJ3 = d_iJ3;
par.avEN = d_avEN;
par.int_rJ3 = d_int_rJ3;
par.int_iJ3 = d_int_iJ3;
double2 B; B.x = reB; B.y = imB;
double2 Astat2 ={A2.real(), A2.imag()};
dim3 numblocks(Nq/NQ, Ns/NS, Nv);
dim3 threadsPerBlock(NP, NQ, NS);
MotionEquationMultiplierDoubleScheme << <numblocks, threadsPerBlock >> >(par, La1 + Ld1 + La2 + Ld2 + Lb, 3, A, Astat2, B);
return retriveBCurr();
}
std::complex<double> Multiplier::CurrentA(double reA, double imA)
{
PAR par;
double d = period;
double h = 2.*Pi/d;
double La = period*double(Nperiods);
par.la = La; par.lb = 1.; par.ld = 1.; par.k1 = k1; par.h = h; par.voltage = voltage;
par.Nz = Nz; par.L = Lsolver; par.wall = wall;
par.g1 = g1; par.g3 = g3;
par.Wmax = d_Wmax; par.Wmin = d_Wmin;
par.rJ3 = d_rJ3; par.iJ3 = d_iJ3;
par.avEN = d_avEN;
par.int_rJ3 = d_int_rJ3;
par.int_iJ3 = d_int_iJ3;
double2 zero = {0,0};
dim3 threadsPerBlock(NP, NQ, NS);
dim3 numblocks(Nq / NQ, Ns / NS, Nv);
double A; A = sqrt(reA*reA + imA*imA);
// printf("\n B loop: %g\n", La+Ld+Lb );
// printf("\n Threads: %i, %i, %i\n", threadsPerBlock.x, threadsPerBlock.y, threadsPerBlock.z );
// hipMemcpy( d_rJ3, &dzero, sizeof(double), hipMemcpyHostToDevice);
// hipMemcpy( d_iJ3, &dzero, sizeof(double), hipMemcpyHostToDevice);
// hipMemcpy(d_int_rJ3, &dzero, sizeof(double), hipMemcpyHostToDevice);
// hipMemcpy(d_int_iJ3, &dzero, sizeof(double), hipMemcpyHostToDevice);
// hipMemcpy( d_avEN, &dzero, sizeof(double), hipMemcpyHostToDevice);
gpuErrChk(hipMemcpy(d_par, &par, sizeof(PAR), hipMemcpyHostToDevice));
MotionEquationMultiplier << <numblocks, threadsPerBlock >> >(d_par, La, 1, A, zero);
return retriveBCurr()*exp(I*arg(reA + I*imA));
}
std::complex<double> MultiplierThreeCavity::CurrentA2(double A1, double reA, double imA)
{
PAR par;
double d = period;
double h = 2.*Pi/d;
double La1 = period*double(Nperiods);
par.la1 = La1; par.la2 = La2; par.ld1 = Ld1;
par.lb = 1.; par.ld = 1.; par.k1 = k1; par.h = h; par.voltage = voltage;
par.Nz = Nz; par.L = Lsolver; par.wall = wall;
par.g1 = g1; par.g3 = g3;
par.Wmax = d_Wmax; par.Wmin = d_Wmin;
par.rJ3 = d_rJ3; par.iJ3 = d_iJ3;
par.avEN = d_avEN;
par.int_rJ3 = d_int_rJ3;
par.int_iJ3 = d_int_iJ3;
double2 zero = {0,0};
double2 A = {reA, imA};
dim3 threadsPerBlock(NP, NQ, NS);
dim3 numblocks(Nq/NQ, Ns/NS, Nv);
hipLaunchKernelGGL(( MotionEquationMultiplierDoubleScheme) , dim3(numblocks), dim3(threadsPerBlock) , 0, 0, par, La1 + La2 + Ld1, 1, A1, A, zero);
return retriveBCurr();
}
void MultiplierMultiModes::CurrentAMultiModes(std::complex<double> *Amps, std::complex<double> * currs, double *buffRe, double *buffIm, int Na, cplx *J1, cplx *J2)
{
PAR par;
double d = period;
double h = 2.*Pi/d;
double La = period*double(Nperiods);
int Nstop = La/dz;
par.la1 = La; par.ld = Ld;
par.lb = 1.; par.k1 = k1; par.h = h; par.voltage = voltage;
par.Nz = Nz; par.L = Lsolver; par.wall = wall;
par.g1 = g1; par.g3 = g3;
par.rJ3 = d_rJ3; par.iJ3 = d_iJ3;
par.avEN = d_avEN;
par.int_rJ3 = d_int_rJ3;
par.int_iJ3 = d_int_iJ3;
par.int_rJ3_1 = d_int_rJ3_1;
par.int_iJ3_1 = d_int_iJ3_1;
double2 zero = {0,0};
dim3 threadsPerBlock(NP, NQ, NS);
dim3 numblocks(Nq/NQ, Ns/NS, Nv);
par.Amps = (double2*) d_Amps;
int ierr = hipMemcpy(d_Amps, (void*) Amps, sizeof(double2)*Na, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( MotionEquationMultiplierMultiModes) , dim3(numblocks), dim3(threadsPerBlock) , 0, 0, par, La, 1, Na, zero);
gpuErrChk(hipPeekAtLastError());
retriveACurrComplex((std::complex<double>*)Amps, currs, buffRe, buffIm, Namm, Nstop);
}
void MultiplierMultiModes::retriveACurrComplex(std::complex<double> *Amps, std::complex<double> *currs, double *currsBuffRe, double *currsBuffIm, int Na, int Nstop)
{
int GQ = Nq / NQ; int GS = Ns / NS; int GV = Nv;
double reJ = 0, imJ = 0;
double rF, iF, z;
double La = period*double(Nperiods);
std::complex<double> J;
// printf("memcpy: %i\t", hipMemcpy((void *)&t_deltaEn, d_int_rJ3, sizeof(double), hipMemcpyDeviceToHost));
// printf("memcpy: %i\n", hipMemcpy((void *)&t_deltaEn2, d_int_iJ3, sizeof(double), hipMemcpyDeviceToHost));
gpuErrChk(hipMemcpy((void *)currsBuffRe, d_rJ3, sizeof(double)*GQ*GS*GV*Nmax, hipMemcpyDeviceToHost))
gpuErrChk(hipMemcpy((void *)currsBuffIm, d_iJ3, sizeof(double)*GQ*GS*GV*Nmax, hipMemcpyDeviceToHost))
for (int a = 0; a < Na; a++)
{
currs[a] = 0;
}
// FILE* debugfile = fopen("F:\\Piotr\\CalcData\\mm_orotron_Data\\debug.txt", "w");
for (int j = 0; j < Nstop; j++)
{
reJ = 0; imJ = 0;
for (int i = 0; i < GQ*GS*GV; i++)
{
reJ += currsBuffRe[i*Nmax + j]; imJ += currsBuffIm[i*Nmax + j];
}
for (int a = 0; a < Na; a++)
{
z = (double)j * dz;
sincos(Pi / La*z*double(a - Na / 2), &iF, &rF);
J = cplx(reJ, imJ)*cplx(rF, -iF);
currs[a] += (J);
// if(a == 1) fprintf(debugfile, "%g,%g,%g,%g,%g\n",z, real(J)/double(Np*Nq*Ns*Nv), imag(J)/double(Np*Nq*Ns*Nv), abs(J)/double(Np*Nq*Ns*Nv), arg(J) );
}
}
double coeff = Lsolver / double(Nz*Np*Nq*Ns*Nv);
for (int a = 0; a < Na; a++) currs[a] *= coeff;
// fclose(debugfile);
}
//////////////////////////////////
ParamsM Device::setPar()
{
ParamsM par;
int GQ = Nq/NQ; int GS = Ns/NS; int GV = Nv;
int gridsize = GQ*GS*GV;
double La = Nperiods*period;
double h = 2.*Pi/period;
par.la = La; par.k1 = k1; par.h = h; par.voltage = voltage;
par.Nz = Nmax; par.L = Lmax; par.wall = wall;
par.g1 = g1; par.Ngrid = gridsize;
par.ar0 = d_ar0; par.ai0 = d_ai0;
par.rJ3 = d_rJ3; par.iJ3 = d_iJ3;
par.delta = 0;
par.Q0 = d_Q0; par.W0 = d_W0;
par.rAk = d_rAk; par.iAk = d_iAk;
par.rAq1k = d_rAq1k; par.iAq1k = d_iAq1k;
par.Qk = d_Qk; par.Wk = d_Wk;
par.ar0_t = d_ar0_t; par.ai0_t = d_ai0_t;
par.int_rQ1 = d_int_rQ1;
par.int_iQ1 = d_int_iQ1;
par.ifnotdestroyed = d_ifnotdestroyed;
par.g3 = g3;
par.rAq1 =d_rAq1;
par.iAq1 =d_iAq1;
par.radii = d_radii;
par.int_rJ3 = d_int_rJ3;
par.int_iJ3 = d_int_iJ3;
par.int_rJ3_1 = d_int_rJ3_1;
par.int_iJ3_1 = d_int_iJ3_1;
par.avEN = d_avEN;
int *mass = new int [Np*Nq*Ns*Nv];
for(int a = 0; a < Np*Nq*Ns*Nv; a++) mass[a] = 1;
gpuErrChk(hipMemcpy(d_ifnotdestroyed, mass, sizeof(int)*Np*Nq*Ns*Nv, hipMemcpyHostToDevice));
delete [] mass;
return par;
} | multiplier.cu | #include <stdio.h>
#include <complex>
#include "multiplier.h"
#include "multiplier_three_cavity.h"
#include "multiplier_multimode.h"
#include "cu_mult.h"
//#include "print.h"
#define PAR ParamsM
#ifndef dm_Pi
__device__ const double dm_Pi = 3.141592653589793;
#endif
int Nz = 0;
double Lsolver = 0;
double MultiplierGroupSpeedCoefficient = 0;
__device__ double grSpeedCoeff;
int *d_Nz;
double *d_Lsolver;
//cudaPitchedPtr d2_rJ3, d2_iJ3, d2_int_rJ3, d2_int_iJ3, d2_W;
//cudaPitchedPtr d1_rJ3, d1_iJ3, d1_int_rJ3, d1_int_iJ3, d1_W;
__device__ void biReduce(double *A, double *B, int p0, int datasize, int logsize)
{
int stride = datasize;
for (int q = 1; q <= logsize; q++)
{
stride = stride >> 1;
if (p0 < stride)
{
A[p0] += A[p0 + stride];
}
else
{
if (p0 < 2 * stride)
{
B[p0 - stride] += B[p0];
}
}
__syncthreads();
}
}
__device__ double dh( double delta)
{
return delta*delta*(grSpeedCoeff);
}
__device__ void funcA (double z, double *rA, double *iA, double2 *Amps, int Na)
{
*rA = 0;
*iA = 0;
double rF, iF;
for(int i = 0; i < Na; i++)
{
sincos(z*double(i - Na/2), &iF, &rF);
*rA += Amps[i].x*rF - Amps[i].y*iF;
*iA += Amps[i].x*iF + Amps[i].y*rF;
}
}
__global__ void
__launch_bounds__ (512, 2)
MotionEquationMultiplier(PAR *par, double Lstop, int Nharm, double A, double2 B)//Fixed Structure
{
unsigned int p0 = threadIdx.x; unsigned int Np = blockDim.x;
unsigned int q0 = threadIdx.y; unsigned int Nq = blockDim.y;
unsigned int s0 = threadIdx.z; unsigned int Ns = blockDim.z;
unsigned int q_init = Nq*blockIdx.x + q0; unsigned int Nq_max = Nq*gridDim.x;
unsigned int s_init = Ns*blockIdx.y + s0; unsigned int Ns_max = Ns*gridDim.y;
unsigned int v_init = blockIdx.z; unsigned int Nv_max = gridDim.z;
int warpsize = Np*Nq*Ns;
int log2warpsize = round(log2((double)warpsize));
int X = blockIdx.x + gridDim.x*blockIdx.y + gridDim.y*gridDim.x*blockIdx.z;
double la, lb, ld, h, k1, voltage, g1, g3;
__shared__ double avEN, int_rJ3, int_iJ3;
int N;
double dz;
N = par->Nz;
la = par->la; lb = par->lb; ld = par->ld;
h = par->h; k1 = par->k1;
g1 = par->g1; g3 = par->g3;
voltage = par->voltage;
double *rJ3 = par->rJ3;
double *iJ3 = par->iJ3;
dz = par->L/(double)N;
double z;
int ifinal = floor(Lstop/dz);
double Q, Qk1, Qk2, Qk3, Qk4;
double W, Wk1, Wk2, Wk3, Wk4;
double fA, fB, rA, r;
double Wmax, Wmin;
double ifnotdestroyed = 1;
double R_cyclotron, R_center, kappa_cyclotron, phase_cyclotron, initial_angle, angle_spread_factor;
double wall = par->wall;
R_center = 0.5*wall + wall*((double)q_init-0.5*(double)Nq_max)/(double)(Nq_max);
initial_angle = 0;//(0.0810194 - 2.05972*R_center + 28.0433*R_center*R_center);
R_cyclotron = (0.568*initial_angle + 0*0.035156*((double)v_init)/double(Nv_max));
kappa_cyclotron = 1.758;
phase_cyclotron = 2.*dm_Pi*(double)s_init/(double)Ns_max;
/*R_center = 0.5*wall + wall*((double)q_init-0.5*(double)Nq_max)/(double)(Nq_max);
initial_angle = 0.087*((double)v_init - 0.5*Nv_max)/double(Nv_max);
R_cyclotron = 0.744*initial_angle;//для f = 86.6 коэфф. 0,568; для f = 95.5: 0.744
kappa_cyclotron = 1.344; //для f = 86.6 коэфф. 1.758; для f = 95.5: 1.344
phase_cyclotron = 2.*dm_Pi*(double)s_init/(double)Ns_max;*/
double en0 = 1. + voltage/511.;
angle_spread_factor = 1./sqrt(1. + initial_angle*initial_angle);
// Вызывает недопустимую операцию при достаточно больших wall. Наверное из-за резкости параболы в initial_angle
Q = 2.*dm_Pi/double(Np)*double(p0);
W = 0;
__shared__ double sh_sinQ[NS*NQ*NP];
__shared__ double sh_cosQ[NS*NQ*NP];
/* __shared__ double shQ[NS][NQ][NP];
__shared__ double shW[NS][NQ][NP];
__shared__ double d2_rJ3[NQ][NP];
__shared__ double d2_iJ3[NQ][NP];
__shared__ double d1_rJ3[NP];
__shared__ double d1_iJ3[NP];*/
double PH, EN, cosPH, sinPH, cosPS, sinPS, rB, iB;
double H = h;//+dh(delta);
if(p0+q_init+s_init + v_init == 0)
{
rJ3[0] = 0;
iJ3[0] = 0;
}
if(p0+q0+s0== 0)
{
par->int_rJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0;
par->int_iJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0;
int_rJ3 = 0;
int_iJ3 = 0;
avEN = 0;
}
// if(s0+p0+q0 == 0) printf("la = %g, ld = %g, lb = %g \n", la, ld, lb);
int i = 0;
for(i = 1; i < N; i++)
{
/////////////////
z = (double)i*dz;
r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron));
ifnotdestroyed *= (r > -wall)? 1. : 0.; ///!!!!!!
// ifnotdestroyed = 1;
PH = Q;
EN = W + en0;
// if((s0+p0+q0 == 0)) printf("%g\t%g\n", z, fB);
fA = ((z<la)?sin(dm_Pi/la*z)* exp(-g1*r):0);
fB = (((z>la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld))*exp(-g3*r):0);
rA = A*fA;
rB = B.x*fB;
iB = B.y*fB;
sincos(PH, &sinPH, &cosPH);
sincos(3.*PH, &sinPS, &cosPS);
Qk1 = dz*(H - k1*EN*angle_spread_factor/sqrt(EN*EN-1.));
Wk1 = -dz*((rA*cosPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed;
/////////////////
z = ((double)i+0.5)*dz;
r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron));
// ifnotdestroyed *= (r > -wall)? 1. : 0.;
// ifnotdestroyed = 1;
PH = Q + 0.5*Qk1;
EN = W + 0.5*Wk1 + en0;
fA = ((z<la)?sin(dm_Pi/la*z) * exp(-g1*r):0);
fB = ((z>la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld))*exp(-g3*r):0;
rA = A*fA;
rB = B.x*fB;
iB = B.y*fB;
sincos(PH, &sinPH, &cosPH);
sincos(3.*PH, &sinPS, &cosPS);
Qk2 = dz*(H - k1*EN*angle_spread_factor/sqrt(EN*EN-1.));
Wk2 = -dz*((rA*cosPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed;
/////////////////
PH = Q + 0.5*Qk2;
EN = W + 0.5*Wk2 + en0;
sincos(PH, &sinPH, &cosPH);
sincos(3.*PH, &sinPS, &cosPS);
Qk3 = dz*(H - k1*EN*angle_spread_factor/sqrt(EN*EN-1.));
Wk3 = -dz*((rA*cosPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed;
/////////////////
z = ((double)i+1.)*dz;
r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron));
// ifnotdestroyed *= (r > -wall)? 1. : 0.;
// ifnotdestroyed = 1;
PH = Q + Qk3;
EN = W + Wk3 + en0;
fA = ((z<la)? sin(dm_Pi/la*z)* exp(-g1*r):0);
fB = (((z>la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld))*exp(-g3*r):0);
rA = A*fA;
rB = B.x*fB;
iB = B.y*fB;
sincos(PH, &sinPH, &cosPH);
sincos(3.*PH, &sinPS, &cosPS);
Qk4= dz*(H - k1*EN*angle_spread_factor/sqrt(EN*EN-1.));
Wk4= -dz*((rA*cosPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed;
///////////////
Q += 1./6.*(Qk1+2.*Qk2+2.*Qk3+Qk4);
W += 1./6.*(Wk1+2.*Wk2+2.*Wk3+Wk4);
/* shQ[s0][q0][p0] = Q;
shW[s0][q0][p0] = W;*/
__syncthreads();
sincos(double(Nharm)*Q, &sinPH, &cosPH);
if(Nharm == 1)
fB = ((z<la)?sin(dm_Pi/la*z):0)*exp(-g1*r); //fB используется как множитель при интегрировании тока вдоль продольной координаты ВНИМАНИЕ!! fB зависит от q0, s0
else
fB = (((z>la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld)):0)*exp(-g3*r);
fB *= ifnotdestroyed;
int xi = p0 + Np*q0 + Np*Nq*s0;
sh_sinQ[xi] = sinPH*fB;
sh_cosQ[xi] = cosPH*fB;
unsigned int stride = warpsize;
__syncthreads();
for(int q = 1; q <= log2warpsize; q++)
{
stride = stride >> 1;//roundf(powf(2., q));
if(xi < stride)
{
sh_sinQ[xi] += sh_sinQ[xi + stride];
}
else
{
if(xi < 2*stride)
{
sh_cosQ[xi - stride] += sh_cosQ[xi];
}
}
__syncthreads();
}
// if((i == 1300)) printf("%g\n", Q);
if(xi == 0)
{
rJ3[X*N+i] = sh_cosQ[0];
iJ3[X*N+i] = -sh_sinQ[0];
// if((i == 1300)) printf("\n%g\n", sh_cosQ[0]);
int_rJ3 += sh_cosQ[0];
int_iJ3 += -sh_sinQ[0];
}
/* //////// усреднение Nharm гармоники
if(s0 == 0)
{
double tmp_rJ3 = 0, tmp_iJ3 = 0, tmpPhCycl = 0;
for(int ii = 0; ii < Ns; ii++)
{
int ii_init = Ns*blockIdx.y + ii;
tmpPhCycl = 2.*dm_Pi*(double)ii_init/(double)Ns_max;
r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + tmpPhCycl));
if(Nharm == 1)
fB = ((z<la)?sin(dm_Pi/la*z):0)*exp(-g1*r); //fB используется как множитель при интегрировании тока вдоль продольной координаты ВНИМАНИЕ!! fB зависит от q0, s0
else
fB = (((z>la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld)):0)*exp(-g3*r);
fB *= ifnotdestroyed;
PH = shQ[ii][q0][p0];
sincos((double)Nharm*PH, &sinPS, &cosPS);
tmp_rJ3 += cosPS*fB;
tmp_iJ3 -= sinPS*fB;
}
d2_rJ3[q0][p0] = tmp_rJ3;
d2_iJ3[q0][p0] = tmp_iJ3;
}
__threadfence();
__syncthreads();
if(s0 + q0 == 0)
{
double tmp_rJ3 = 0, tmp_iJ3 = 0;
for(int ii = 0; ii < Nq; ii++)
{
tmp_rJ3 += d2_rJ3[ii][p0];
tmp_iJ3 += d2_iJ3[ii][p0];
}
d1_rJ3[p0] = tmp_rJ3;
d1_iJ3[p0] = tmp_iJ3;
}
__threadfence();
__syncthreads();
if(p0 + q0 +s0 == 0)
{
double tmp_rJ3 = 0, tmp_iJ3 = 0;
for(int ii = 0; ii < Np; ii++)
{
tmp_rJ3 += d1_rJ3[ii];
tmp_iJ3 += d1_iJ3[ii];
}
rJ3[i] = tmp_rJ3;
iJ3[i] = tmp_iJ3;
int_rJ3 += tmp_rJ3;
int_iJ3 += tmp_iJ3;
// if(i == ifinal) printf("<< %g %g\n", A*int_rJ3, A*tmp_rJ3);
}
__threadfence();
__syncthreads();
//////////////////// конец усреднения Nharm гармоники
*/
if(i == ifinal)
{
sh_sinQ[xi] = W;
__syncthreads();
stride = warpsize;
for(int q = 1; q <= log2warpsize; q++)
{
stride = stride >> 1;//warpsize/roundf(powf(2., q));
if(xi < stride)
{
sh_sinQ[xi] += sh_sinQ[xi + stride];
}
__syncthreads();
}
if(xi == 0)
{
avEN = sh_sinQ[0];
}
__syncthreads();
sh_sinQ[xi] = W;
sh_cosQ[xi] = W;
stride = warpsize;
for(int q = 1; q <= log2warpsize; q++)
{
stride = stride >> 1;// stride = warpsize/roundf(powf(2., q));
if(xi < stride)
{
sh_sinQ[xi] = (sh_sinQ[xi] > sh_sinQ[xi + stride]) ? sh_sinQ[xi] : sh_sinQ[xi + stride];
}
else
{
if(xi < 2*stride)
{
sh_cosQ[xi - stride] = (sh_cosQ[xi - stride] < sh_cosQ[xi]) ? sh_cosQ[xi - stride] : sh_cosQ[xi];
}
}
__syncthreads();
}
if(xi == 0)
{
Wmax = sh_sinQ[0];
Wmin = sh_cosQ[0];
}
/*
if(s0 == 0)
{
double tmp_W = 0;
for(int ii = 0; ii < Ns; ii++)
{
EN = shW[ii][q0][p0];
tmp_W += EN;
}
d2_rJ3[q0][p0] = tmp_W;
// if((p0 == 0)) printf(" %g >>, \n", d2_rJ3[q0][p0]);
}
__threadfence();
__syncthreads();
if(s0 + q0 == 0)
{
double tmp_rJ3 = 0;
for(int ii = 0; ii < Nq; ii++)
tmp_rJ3 += d2_rJ3[ii][p0];
d1_rJ3[p0] = tmp_rJ3;
}
__threadfence();
__syncthreads();
if(p0 + q0 +s0 == 0)
{
double tmp_rJ3 = 0;
for(int ii = 0; ii < Np; ii++)
tmp_rJ3 += d1_rJ3[ii];
(avEN) += tmp_rJ3;
}
*/
__syncthreads();
}
__threadfence();
__syncthreads();
if(i > ifinal) break;
}
// printf("END\t");
// if(p0 + s0 == 0) printf("(%i, %i, %i)...<%g, %g> =?= <%g>...\n", blockIdx.x, blockIdx.y, blockIdx.z,A*int_rJ3*dz, A*int_iJ3*dz, avEN);
__syncthreads();
// if(p0+q_init+s_init + v_init == 0)
if(p0+q0+s0 == 0)
{
/* printf("%i, %i, %i\t (%g, %g)\n",
blockIdx.x, blockIdx.y, blockIdx.z,
int_rJ3/double(Np*Nq_max*Ns_max*N)*(par->L),
int_iJ3/double(Np*Nq_max*Ns_max*N)*(par->L)) ;*/
par->avEN[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = avEN;
par->int_rJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_rJ3;
par->int_iJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_iJ3;
par->Wmax[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = Wmax;
par->Wmin[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = Wmin;
}
}
__global__ void
__launch_bounds__ (512, 2)
MotionEquationMultiplierDoubleScheme(PAR par, double Lstop, int Nharm, double A, double2 A2, double2 B)//Fixed Structure
{
unsigned int p0 = threadIdx.x;
unsigned int q0 = threadIdx.y;
unsigned int s0 = threadIdx.z;
unsigned int Np = blockDim.x;
unsigned int Nq = blockDim.y;
unsigned int Ns = blockDim.z;
unsigned int q_init = Nq*blockIdx.x + q0;
unsigned int s_init = Ns*blockIdx.y + s0;
unsigned int v_init = blockIdx.z;
unsigned int Nq_max = Nq*gridDim.x;
unsigned int Ns_max = Ns*gridDim.y;
unsigned int Nv_max = gridDim.z;
// printf("Thread %i/%i, %i/%i started; ", q0, Nq, p0, Np);
double la1, la2, lb, ld1, ld2, h, k1, voltage, g1, g3;
double fA2, rA2, iA2;
__shared__ double avEN, int_rJ3, int_iJ3;
// printf("Step0; ");
int N;
double dz;
N = par.Nz;
la1 = par.la1; la2 = par.la2; lb = par.lb;
ld1 = par.ld1; ld2 = par.ld2;
h = par.h; k1 = par.k1;
g1 = par.g1; g3 = par.g3;
voltage = par.voltage;
double la_tot = la1 + la2 + ld1;
double ifnotdestroyed = 1;
double *rJ3 = par.rJ3;
double *iJ3 = par.iJ3;
// double *int_rJ3 = par.int_rJ3;
// double *int_iJ3 = par.int_iJ3;
dz = par.L/(double)N;
double z;
int ifinal = floor(Lstop/dz);
double Q, Qk1, Qk2, Qk3, Qk4;
double W, Wk1, Wk2, Wk3, Wk4;
double fB;
double R_cyclotron, R_center, kappa_cyclotron, phase_cyclotron, initial_angle;
double wall = par.wall, r;
R_center = 0.5*wall + wall*((double)q_init-0.5*(double)Nq_max)/(double)(Nq_max);
initial_angle = (0.0810194 - 2.05972*R_center + 28.0433*R_center*R_center);
R_cyclotron = 0.568*initial_angle + 0.035156*((double)v_init)/double(Nv_max);
kappa_cyclotron = 1.758;
phase_cyclotron = 2.*dm_Pi*(double)s_init/(double)Ns_max;
double en0 = 1. + voltage/511.;
en0 -= 0.5*initial_angle*initial_angle*(en0*en0 - 1)*en0;
Q = 2.*dm_Pi/double(Np)*double(p0);// + 1./(double)Nq*((double)q0 + (double)s0/(double)Ns));
W = 0;
__shared__ double shQ[NS][NQ][NP];
__shared__ double shW[NS][NQ][NP];
__shared__ double d2_rJ3[NQ][NP];
__shared__ double d2_iJ3[NQ][NP];
__shared__ double d1_rJ3[NP];
__shared__ double d1_iJ3[NP];
double PH, EN, cosPH, sinPH, cosPS, sinPS, rA, rB, iB;
// printf("Step3; ");
double H = h;//+dh(delta);
if(p0+q_init+s_init + v_init == 0)
{
rJ3[0] = 0;
iJ3[0] = 0;
// printf("init:\t%i\t%i\t%i\t%i...........\n",p0, q_init,s_init,v_init);
}
if(p0+q0+s0== 0)
{
par.int_rJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0;
par.int_iJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0;
int_rJ3 = 0;
int_iJ3 = 0;
avEN = 0;
}
int i = 0;
for(i = 1; i < N; i++)
{
/////////////////
z = (double)i*dz;
r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron));
ifnotdestroyed *= (r > -wall)? 1. : 0.;
PH = Q;
EN = W + en0;
rA = A *((z<la1) ? sin(dm_Pi/la1*z) *exp(-g1*r):0);
fA2= ( ((la1+ld1<z)&&(z<la_tot))? sin(dm_Pi/la2*(z-la1 - ld1)) *exp(-g1*r):0);
rA2 = A2.x*fA2;
iA2 = A2.y*fA2;
fB = ( ((z> la_tot+ld2)&&(z < la_tot+ld2+lb))?sin(dm_Pi/lb*(z-la_tot-ld2)) *exp(-g3*r):0);
rB = B.x*fB;
iB = B.y*fB;
sincos(PH, &sinPH, &cosPH);
sincos(3.*PH, &sinPS, &cosPS);
Qk1 = dz*(H - k1*EN/sqrt(EN*EN-1.));
Wk1 = -dz*((rA*cosPH)+(rA2*cosPH-iA2*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed;
/////////////////
z = ((double)i+0.5)*dz;
r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron));
ifnotdestroyed *= (r > -wall)? 1. : 0.;
PH = Q + 0.5*Qk1;
EN = W + 0.5*Wk1 + en0;
rA = A *((z<la1) ? sin(dm_Pi/la1*z) *exp(-g1*r):0);
fA2= ( ((la1+ld1<z)&&(z<la_tot))? sin(dm_Pi/la2*(z-la1 - ld1)) *exp(-g1*r):0);
rA2 = A2.x*fA2;
iA2 = A2.y*fA2;
fB = ( ((z> la_tot+ld2)&&(z < la_tot+ld2+lb))?sin(dm_Pi/lb*(z-la_tot-ld2)) *exp(-g3*r):0);
rB = B.x*fB;
iB = B.y*fB;
sincos(PH, &sinPH, &cosPH);
sincos(3.*PH, &sinPS, &cosPS);
Qk2 = dz*(H - k1*EN/sqrt(EN*EN-1.));
Wk2 = -dz*((rA*cosPH)+(rA2*cosPH-iA2*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed;
/////////////////
PH = Q + 0.5*Qk2;
EN = W + 0.5*Wk2 + en0;
sincos(PH, &sinPH, &cosPH);
sincos(3.*PH, &sinPS, &cosPS);
Qk3 = dz*(H - k1*EN/sqrt(EN*EN-1.));
Wk3 = -dz*((rA*cosPH)+(rA2*cosPH-iA2*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed;
/////////////////
z = ((double)i+1)*dz;
r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron));
ifnotdestroyed *= (r > -wall)? 1. : 0.;
PH = Q + Qk3;
EN = W + Wk3 + en0;
rA = A *((z<la1) ? sin(dm_Pi/la1*z) *exp(-g1*r):0);
fA2= ( ((la1+ld1<z)&&(z<la_tot))? sin(dm_Pi/la2*(z-la1 - ld1)) *exp(-g1*r):0);
rA2 = A2.x*fA2;
iA2 = A2.y*fA2;
fB = ( ((z> la_tot+ld2)&&(z < la_tot+ld2+lb))?sin(dm_Pi/lb*(z-la_tot-ld2)) *exp(-g3*r):0);
rB = B.x*fB;
iB = B.y*fB;
sincos(PH, &sinPH, &cosPH);
sincos(3.*PH, &sinPS, &cosPS);
Qk4= dz*(H - k1*EN/sqrt(EN*EN-1.));
Wk4= -dz*((rA*cosPH)+(rA2*cosPH-iA2*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed;
///////////////
Q += 1./6.*(Qk1+2.*Qk2+2.*Qk3+Qk4);
W += 1./6.*(Wk1+2.*Wk2+2.*Wk3+Wk4);
// printf("#< %i -> (%i, %i, %i)>\t", i, s0, q0, p0);
// printf("#<%i>", Np_Q*Nq*(Ns*i+s0) + Np_Q*q0 + p0);
// if(q0+p0+s0 == 0) printf("%i", i);
shQ[s0][q0][p0] = Q;
shW[s0][q0][p0] = W;
__threadfence();
__syncthreads();
//////// усреднение какой-то гармоники
if(s0 == 0)
{
double tmp_rJ3 = 0, tmp_iJ3 = 0;
for(int ii = 0; ii < Ns; ii++)
{
PH = shQ[ii][q0][p0];
sincos((double)Nharm*PH, &sinPS, &cosPS);
tmp_rJ3 += cosPS;
tmp_iJ3 -= sinPS;
}
d2_rJ3[q0][p0] = tmp_rJ3;
d2_iJ3[q0][p0] = tmp_iJ3;
}
__threadfence();
__syncthreads();
if(s0 + q0 == 0)
{
double tmp_rJ3 = 0, tmp_iJ3 = 0;
for(int ii = 0; ii < Nq; ii++)
{
tmp_rJ3 += d2_rJ3[ii][p0];
tmp_iJ3 += d2_iJ3[ii][p0];
}
d1_rJ3[p0] = tmp_rJ3;
d1_iJ3[p0] = tmp_iJ3;
}
__threadfence();
__syncthreads();
if(p0 + q0 +s0 == 0)
{
double tmp_rJ3 = 0, tmp_iJ3 = 0;
for(int ii = 0; ii < Np; ii++)
{
tmp_rJ3 += d1_rJ3[ii];
tmp_iJ3 += d1_iJ3[ii];
}
rJ3[i] = tmp_rJ3;
iJ3[i] = tmp_iJ3;
int_rJ3 += tmp_rJ3*((Nharm == 3)?fB:fA2);
int_iJ3 += tmp_iJ3*((Nharm == 3)?fB:fA2);
}
__threadfence();
__syncthreads();
//////////////////// конец усреднения какой-то гармоники
// if((q0+p0 == 0)&&(s0 == 0)) printf("%i\t%g\t%g\n", q0, PH, EN);
// if(q0+p0+s0 == 0) printf("....%i\t", i);
/////////////////////// усреднение энергии
if(i == ifinal)
{
if(s0 == 0)
{
double tmp_W = 0;
for(int ii = 0; ii < Ns; ii++)
{
EN = shW[ii][q0][p0];
tmp_W += EN;
}
d2_rJ3[q0][p0] = W;
}
__threadfence();
__syncthreads();
if(s0 + q0 == 0)
{
double tmp_rJ3 = 0;
for(int ii = 0; ii < Nq; ii++)
tmp_rJ3 += d2_rJ3[ii][p0];
d1_rJ3[p0] = tmp_rJ3;
}
__threadfence();
__syncthreads();
if(p0 + q0 +s0 == 0)
{
double tmp_rJ3 = 0;
for(int ii = 0; ii < Np; ii++)
tmp_rJ3 += d1_rJ3[ii];
(avEN) += tmp_rJ3;
}
}
///////////////// конец усреднения энергии
__threadfence();
__syncthreads();
if(i > ifinal) break;
}
__syncthreads();
if(p0+q0+s0 == 0)
{
*par.avEN = avEN;
par.int_rJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_rJ3;
par.int_iJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_iJ3;
}
}
__global__ void
__launch_bounds__ (512, 2)
MotionEquationMultiplierMultiModes(PAR par, double Lstop, int Nharm, int Na, double2 B)//Fixed Structure
{
unsigned int p0 = threadIdx.x;
unsigned int q0 = threadIdx.y;
unsigned int s0 = threadIdx.z;
unsigned int Np = blockDim.x;
unsigned int Nq = blockDim.y;
unsigned int Ns = blockDim.z;
unsigned int q_init = Nq*blockIdx.x + q0;
unsigned int s_init = Ns*blockIdx.y + s0;
unsigned int v_init = blockIdx.z;
unsigned int Nq_max = Nq*gridDim.x;
unsigned int Ns_max = Ns*gridDim.y;
// unsigned int Nv_max = gridDim.z;
int warpsize = Np*Nq*Ns;
int log2warpsize = round(log2((double)warpsize));
double la, lb, ld, h, k1, voltage, g1, g3;
double rA1, iA1;
__shared__ double avEN, int_rJ3, int_iJ3, int_rJ3_1, int_iJ3_1;
int N;
double dz;
N = par.Nz;
la = par.la1; lb = par.lb;
ld = par.ld;
h = par.h; k1 = par.k1;
g1 = par.g1; g3 = par.g3;
voltage = par.voltage;
double ifnotdestroyed = 1;
double *rJ3 = par.rJ3;
double *iJ3 = par.iJ3;
double2 *Amps = (double2 *)par.Amps;
// double *int_rJ3 = par.int_rJ3;
// double *int_iJ3 = par.int_iJ3;
dz = par.L/(double)N;
double z;
int ifinal = floor(Lstop/dz);
double Q, Qk1, Qk2, Qk3, Qk4;
double W, Wk1, Wk2, Wk3, Wk4;
double fB;
double R_cyclotron, R_center, kappa_cyclotron, phase_cyclotron, initial_angle;
double wall = par.wall, r;
R_center = 0.5*wall + wall*((double)q_init-0.5*(double)Nq_max)/(double)(Nq_max);
initial_angle = 0;//(0.0810194 - 2.05972*R_center + 28.0433*R_center*R_center);
R_cyclotron = 0;//0.568*initial_angle + 0.035156*((double)v_init)/double(Nv_max);
kappa_cyclotron = 1.758;
phase_cyclotron = 2.*dm_Pi*(double)s_init/(double)Ns_max;
double en0 = 1. + voltage/511.;
en0 -= 0.5*initial_angle*initial_angle*(en0*en0 - 1)*en0;
double beta0 = sqrt(en0*en0 - 1)/en0;
// double Delta = k1*dm_Pi/(la*beta0) ;// \delta f / f = (k_0 \pi /L)/beta_ph
Q = 2.*dm_Pi/double(Np)*double(p0);// + 1./(double)Nq*((double)q0 + (double)s0/(double)Ns));
W = 0;
__shared__ double2 shAmps[NP];
__shared__ double sh_sinQ[NS*NQ*NP];
__shared__ double sh_cosQ[NS*NQ*NP];
double PH, EN, cosPH, sinPH, cosPS, sinPS, rB, iB;
double H = h;//+dh(delta);
if(p0+q_init+s_init + v_init == 0)
{
rJ3[0] = 0;
iJ3[0] = 0;
}
if(p0+q0+s0== 0)
{
par.int_rJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0;
par.int_iJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0;
par.int_rJ3_1[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0;
par.int_iJ3_1[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = 0;
int_rJ3 = 0;
int_iJ3 = 0;
int_rJ3_1 = 0;
int_iJ3_1 = 0;
avEN = 0;
}
if((q0 + s0 == 0)&&(p0 < Na))
{
shAmps[p0] = Amps[p0];
}
__syncthreads();
int i = 0;
for(i = 1; i < N; i++)
{
/////////////////
z = (double)i*dz;
r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron));
ifnotdestroyed *= 1;//(r > -wall)? 1. : 0.;
PH = Q;
EN = W + en0;
funcA(dm_Pi/la*z, &rA1, &iA1, shAmps, Na);
if(z > la) {rA1 =0; iA1 = 0;}
rA1 *= exp(-g1*r); iA1 *= exp(-g1*r);
fB = ( ((z> la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld)) *exp(-g3*r):0);
rB = 0;//B.x*fB;
iB = 0;//B.y*fB;
sincos(PH, &sinPH, &cosPH);
sincos(3.*PH, &sinPS, &cosPS);
Qk1 = dz*(H - k1*EN/sqrt(EN*EN-1.));
Wk1 = -dz*((rA1*cosPH - iA1*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed;
// if(s0 + p0 + q0 == 0 && (i == 1)) printf("%g,%g,%g,%g\n", r, g1, Qk1, Wk1);
/////////////////
z = ((double)i+0.5)*dz;
r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron));
// ifnotdestroyed *= (r > -wall)? 1. : 0.;
PH = Q + 0.5*Qk1;
EN = W + 0.5*Wk1 + en0;
if(z > la) {rA1 =0; iA1 = 0;}
fB = ( ((z> la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld)) *exp(-g3*r):0);
rB = 0;//B.x*fB;
iB = 0;//B.y*fB;
sincos(PH, &sinPH, &cosPH);
sincos(3.*PH, &sinPS, &cosPS);
Qk2 = dz*(H - k1*EN/sqrt(EN*EN-1.));
Wk2 = -dz*((rA1*cosPH - iA1*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed;
/////////////////
PH = Q + 0.5*Qk2;
EN = W + 0.5*Wk2 + en0;
sincos(PH, &sinPH, &cosPH);
sincos(3.*PH, &sinPS, &cosPS);
Qk3 = dz*(H - k1*EN/sqrt(EN*EN-1.));
Wk3 = -dz*((rA1*cosPH - iA1*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed;
/////////////////
z = ((double)i+1)*dz;
r = (R_center + R_cyclotron*cos(kappa_cyclotron*z + phase_cyclotron));
// ifnotdestroyed *= (r > -wall)? 1. : 0.;
PH = Q + Qk3;
EN = W + Wk3 + en0;
if(z > la) {rA1 =0; iA1 = 0;}
fB = ( ((z> la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld)) *exp(-g3*r):0);
rB = 0;//B.x*fB;
iB = 0;//B.y*fB;
sincos(PH, &sinPH, &cosPH);
sincos(3.*PH, &sinPS, &cosPS);
Qk4= dz*(H - k1*EN/sqrt(EN*EN-1.));
Wk4= -dz*((rA1*cosPH - iA1*sinPH)+(rB*cosPS-iB*sinPS))*ifnotdestroyed;
///////////////
Q += 1./6.*(Qk1+2.*Qk2+2.*Qk3+Qk4);
W += 1./6.*(Wk1+2.*Wk2+2.*Wk3+Wk4);
__syncthreads();
sincos(double(Nharm)*Q, &sinPH, &cosPH);
if(Nharm == 1)
fB = exp(-g1*r); //fB используется как множитель при интегрировании тока вдоль продольной координаты ВНИМАНИЕ!! fB зависит от q0, s0
else
fB = (((z>la+ld)&&(z < la+ld+lb))?sin(dm_Pi/lb*(z-la-ld)):0)*exp(-g3*r);
fB *= ifnotdestroyed;
int xi = p0 + Np*q0 + Np*Nq*s0;
int X = blockIdx.x + gridDim.x*blockIdx.y + gridDim.y*gridDim.x*blockIdx.z;
sh_sinQ[xi] = sinPH*fB;
sh_cosQ[xi] = cosPH*fB;
__syncthreads();
biReduce(sh_sinQ, sh_cosQ, xi, warpsize, log2warpsize);
if(xi == 0)
{
rJ3[X*N+i] = sh_cosQ[0];
iJ3[X*N+i] = -sh_sinQ[0];
int_rJ3 += sh_cosQ[0];
int_iJ3 += -sh_sinQ[0];
}
/////////////////////// усреднение энергии
if(i == ifinal)
{
sh_sinQ[xi] = W;
__syncthreads();
biReduce(sh_sinQ, sh_cosQ, xi, warpsize, log2warpsize);
if(xi == 0)
{
avEN = sh_sinQ[0];
}
__syncthreads();
}
///////////////// конец усреднения энергии
__threadfence();
__syncthreads();
if(i > ifinal) break;
}
__syncthreads();
if(p0+q0+s0 == 0)
{
*par.avEN = avEN;
par.int_rJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_rJ3;
par.int_iJ3[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_iJ3;
par.int_rJ3_1[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_rJ3_1;
par.int_iJ3_1[gridDim.x*gridDim.y*blockIdx.z + gridDim.x*blockIdx.y+blockIdx.x] = int_iJ3_1;
}
}
std::complex<double> Multiplier::retriveBCurr()
{
int GQ = Nq/NQ; int GS = Ns/NS; int GV = Nv;
double t_deltaEn[512]; double t_deltaEn2[512];
double reJ = 0, imJ = 0;
// printf("memcpy: %i\t", cudaMemcpy((void *)&t_deltaEn, d_int_rJ3, sizeof(double), cudaMemcpyDeviceToHost));
// printf("memcpy: %i\n", cudaMemcpy((void *)&t_deltaEn2, d_int_iJ3, sizeof(double), cudaMemcpyDeviceToHost));
cudaMemcpy((void *) t_deltaEn, d_int_rJ3, sizeof(double)*GQ*GS*GV, cudaMemcpyDeviceToHost);
cudaMemcpy((void *) t_deltaEn2, d_int_iJ3, sizeof(double)*GQ*GS*GV, cudaMemcpyDeviceToHost);
for(int i = 0; i < GQ*GS*GV; i++){
reJ += t_deltaEn[i]; imJ += t_deltaEn2[i];
}
double coeff = Lsolver/double(Nz*Np*Nq*Ns*Nv);
// printf("re = %g, im = %g\n", reJ*coeff, imJ*coeff);
std::complex<double> res = std::complex<double> (reJ*coeff, imJ*coeff);
return res;
}
void Multiplier::retriveBCurr(std::complex<double> *J1, std::complex<double> *J2)
{
int GQ = Nq/NQ; int GS = Ns/NS; int GV = Nv;
double t_Jre[512]; double t_Jim[512];
double t_J2re[512]; double t_J2im[512];
double reJ = 0, imJ = 0;
double re2J = 0, im2J = 0;
// printf("memcpy: %i\t", cudaMemcpy((void *)&t_deltaEn, d_int_rJ3, sizeof(double), cudaMemcpyDeviceToHost));
// printf("memcpy: %i\n", cudaMemcpy((void *)&t_deltaEn2, d_int_iJ3, sizeof(double), cudaMemcpyDeviceToHost));
cudaMemcpy((void *) t_Jre, d_int_rJ3, sizeof(double)*GQ*GS*GV, cudaMemcpyDeviceToHost);
cudaMemcpy((void *) t_Jim, d_int_iJ3, sizeof(double)*GQ*GS*GV, cudaMemcpyDeviceToHost);
cudaMemcpy((void *) t_J2re, d_int_rJ3_1, sizeof(double)*GQ*GS*GV, cudaMemcpyDeviceToHost);
cudaMemcpy((void *) t_J2im, d_int_iJ3_1, sizeof(double)*GQ*GS*GV, cudaMemcpyDeviceToHost);
for(int i = 0; i < GQ*GS*GV; i++){
reJ += t_Jre[i]; imJ += t_Jim[i];
re2J += t_J2re[i]; im2J += t_J2im[i];
}
double coeff = Lsolver/double(Nz*Np*Nq*Ns*Nv);
// printf("re = %g, im = %g\n", reJ*coeff, imJ*coeff);
std::complex<double> res1 = std::complex<double> (reJ*coeff, imJ*coeff);
std::complex<double> res2 = std::complex<double> (re2J*coeff, im2J*coeff);
*J1 = res1; *J2 = res2;
// printf("J1 = %g, %g\tJ2 = %g, %g\n", *J1, *J2);
}
double Multiplier::retriveDeltaEnergy()
{
int GQ = Nq/NQ; int GS = Ns/NS; int GV = Nv;
double t_deltaEn[512];
double t_wmax[512];
double t_wmin[512];
double averagedEn = 0, wmax = -99999, wmin = 99999;
cudaMemcpy( t_deltaEn, d_avEN, sizeof(double)*GQ*GS*GV, cudaMemcpyDeviceToHost);
cudaMemcpy( t_wmax, d_Wmax, sizeof(double)*GQ*GS*GV, cudaMemcpyDeviceToHost);
cudaMemcpy( t_wmin, d_Wmin, sizeof(double)*GQ*GS*GV, cudaMemcpyDeviceToHost);
for(int i = 0; i < GQ*GS*GV; i++)
{
wmax =(wmax > t_wmax[i]) ? wmax : t_wmax[i];
wmin =(wmin < t_wmin[i]) ? wmin : t_wmin[i];
averagedEn += t_deltaEn[i];
// printf("%g\n", t_deltaEn[i]/double(NP*NQ*NS));
}
double coeff = 1./double(Np*Nq*Ns*Nv);
// printf("deltaW + = %g \t deltaW - = %g\n", wmax*511000., wmin*511000.);
return averagedEn*coeff;
}
bool Device::initSolver(int nz, double lsolver, double groupSpeedCoeff, char *_solverName)
{
Nz = nz;
Lsolver = lsolver;
Lmax = lsolver;
solverName = _solverName;
Nmax = nz;
MultiplierGroupSpeedCoefficient = groupSpeedCoeff;
printf("The %s solver is intialized\n", solverName);
int GQ = Nq/NQ; int GS = Ns/NS; int GV = Nv;
// printf(" Nq %i, Ns %i, Nv %i \t GQ %i, GS %i, GV %i \n",Nq, Ns, Nv, GQ, GS, GV);
printf("Nz, Lsolver, grSpeed, %i, %g, %g\n", Nz, Lsolver,MultiplierGroupSpeedCoefficient);
gpuErrChk(cudaMalloc((void**)&d_rJ3, Nz*GQ*GS*GV*sizeof(double)));
gpuErrChk(cudaMalloc((void**)&d_iJ3, Nz*GQ*GS*GV*sizeof(double)));
gpuErrChk(cudaMalloc((void**)&d_Nz, sizeof(int)));
gpuErrChk(cudaMalloc((void**)&d_Lsolver, sizeof(double)));
gpuErrChk(cudaMalloc((void**)&d_avEN, sizeof(double)*GQ*GS*GV));
gpuErrChk(cudaMalloc((void**)&d_int_rJ3_1, sizeof(double)*GQ*GS*GV));
gpuErrChk(cudaMalloc((void**)&d_int_iJ3_1, sizeof(double)*GQ*GS*GV));
gpuErrChk(cudaMalloc((void**)&d_Amps, sizeof(cplx) * 30));
if(strcmp(solverName,"multiplier_spcharge_2d") != 0)
{
gpuErrChk(cudaMalloc((void**)&d_int_rJ3, sizeof(double)*GQ*GS*GV));
gpuErrChk(cudaMalloc((void**)&d_int_iJ3, sizeof(double)*GQ*GS*GV));
}
gpuErrChk(cudaMalloc((void**)&d_Wmax, sizeof(double)*GQ*GS*GV));
gpuErrChk(cudaMalloc((void**)&d_Wmin, sizeof(double)*GQ*GS*GV));
gpuErrChk(cudaMalloc((void**)&d_par, sizeof(PAR)));
gpuErrChk(cudaMalloc((void**)&grSpeedCoeff, sizeof(double)));
gpuErrChk(cudaMemcpy((void*)d_Nz, &Nz, sizeof(int), cudaMemcpyHostToDevice));
gpuErrChk(cudaMemcpy((void*)&grSpeedCoeff, &MultiplierGroupSpeedCoefficient, sizeof(double), cudaMemcpyHostToDevice)); // TODO Here is a bug
gpuErrChk(cudaMemcpy((void*)d_Lsolver, (void*)&Lsolver, sizeof(double), cudaMemcpyHostToDevice));
return 1;
}
void Device::releaseDeviceMemory()
{
cudaFree((void*)d_Nz);
cudaFree((void*)d_Lsolver);
cudaFree((void*)d_avEN);
cudaFree((void*)d_int_rJ3);
cudaFree((void*)d_int_iJ3);
if(fieldLoaded)
{
cudaFree((void*) d_tAr);
cudaFree((void*) d_tAi);
}
}
double Multiplier::DeltaEnergy(double A)
{
PAR par;
double d = period;
double h = 2.*Pi/d;
double La = period*double(Nperiods);
par.la = La; par.lb = Lb; par.ld = Ld; par.k1 = k1; par.h = h; par.voltage = voltage;
par.Nz = Nz; par.L = Lsolver; par.wall = wall;
par.g1 = g1; par.g3 = g3;
par.Wmax = d_Wmax; par.Wmin = d_Wmin;
par.rJ3 = d_rJ3; par.iJ3 = d_iJ3;
par.avEN = d_avEN;
par.int_rJ3 = d_int_rJ3;
par.int_iJ3 = d_int_iJ3;
double2 zero = {0,0};
// cudaMemcpy( d_rJ3, &dzero, sizeof(double), cudaMemcpyHostToDevice);
// cudaMemcpy( d_iJ3, &dzero, sizeof(double), cudaMemcpyHostToDevice);
// cudaMemcpy(d_int_rJ3, &dzero, sizeof(double), cudaMemcpyHostToDevice);
// cudaMemcpy(d_int_iJ3, &dzero, sizeof(double), cudaMemcpyHostToDevice);
// cudaMemcpy( d_avEN, &dzero, sizeof(double), cudaMemcpyHostToDevice);
dim3 threadsPerBlock(NP, NQ, NS);
cudaMemcpy(d_par, &par, sizeof(PAR), cudaMemcpyHostToDevice);
MotionEquationMultiplier<<<dim3((size_t) Nq/NQ,(size_t) Ns/NS,(size_t) Nv), threadsPerBlock>>>(d_par, La, 1, A, zero);
/* double *debRe = new double [Nz];
double *debIm = new double [Nz];
cudaError copy1 = cudaMemcpy((void*) debRe, (void *)dm_rJq, sizeof(double)*Nz, cudaMemcpyDeviceToHost);
printf("copy1 = %i \n", copy1);
cudaError_t copy2 = cudaMemcpy((void*) debIm, (void *)dm_iJq, sizeof(double)*Nz, cudaMemcpyDeviceToHost);
printf("copy2 = %i \n", copy2);
*/
//printf("memcpy: %i \n", cudaMemcpy((void*) &t_deltaEn, d_avEN, sizeof(double), cudaMemcpyDeviceToHost));
//printf("Energy delta = %g \n", t_deltaEn/double(NP*NQ*NS));
double res = retriveDeltaEnergy();
// printf("Retrieve returned: %g \n", res);
return res;
// delete[] debRe; delete[] debIm;
}
std::complex<double> Multiplier::CurrentB(double reB, double imB, double A)
{
PAR par;
double d = period;
double h = 2.*Pi/d;
double La = period*double(Nperiods);
par.la = La; par.lb = Lb; par.ld = Ld; par.k1 = k1; par.h = h; par.voltage = voltage;
par.Nz = Nz; par.L = Lsolver; par.wall = wall;
par.g1 = g1; par.g3 = g3;
par.Wmax = d_Wmax; par.Wmin = d_Wmin;
// printf("CurrentB: %g, %g, %g \n", La, Ld, Lb);
cudaMemset(d_rJ3, 0, sizeof(double)*Nz);
cudaMemset(d_iJ3, 0, sizeof(double)*Nz);
par.rJ3 = d_rJ3; par.iJ3 = d_iJ3;
par.avEN = d_avEN;
par.int_rJ3 = d_int_rJ3;
par.int_iJ3 = d_int_iJ3;
double2 B; B.x = reB; B.y = imB;
// printf("\n B loop: %g\n", La+Ld+Lb );
// printf("\n Threads: %i, %i, %i\n", threadsPerBlock.x, threadsPerBlock.y, threadsPerBlock.z );
dim3 numblocks(Nq/NQ, Ns/NS, Nv);
dim3 threadsPerBlock(NP, NQ, NS);
// cudaMemcpy( d_rJ3, &dzero, sizeof(double), cudaMemcpyHostToDevice);
// cudaMemcpy( d_iJ3, &dzero, sizeof(double), cudaMemcpyHostToDevice);
// cudaMemcpy(d_int_rJ3, &dzero, sizeof(double), cudaMemcpyHostToDevice);
// cudaMemcpy(d_int_iJ3, &dzero, sizeof(double), cudaMemcpyHostToDevice);
// cudaMemcpy( d_avEN, &dzero, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_par, &par, sizeof(PAR), cudaMemcpyHostToDevice);
MotionEquationMultiplier << <numblocks, threadsPerBlock >> >(d_par, La + Ld + Lb, 3, A, B);
double *jr = new double [Nz];
double *ji = new double [Nz];
cudaMemcpy(jr, d_rJ3, sizeof(double)*Nz, cudaMemcpyDeviceToHost);
cudaMemcpy(ji, d_iJ3, sizeof(double)*Nz, cudaMemcpyDeviceToHost);
FILE *resamp_ar = fopen("F:\\Piotr\\bwo_Data\\mdebug_jr.csv", "w");
FILE *resamp_ai = fopen("F:\\Piotr\\bwo_Data\\mdebug_ji.csv", "w");
for(int j = 0; j < Nz; j++)
{
fprintf(resamp_ar, "%i,%g\n", j, jr[j]);
fprintf(resamp_ai, "%i,%g\n", j, ji[j]);
}
fclose(resamp_ar);
fclose(resamp_ai);
delete []jr;
delete []ji;
return retriveBCurr();
}
std::complex<double> MultiplierThreeCavity::CurrentB2(double reB, double imB, double A, cplx A2)
{
PAR par;
double d = period;
double h = 2.*Pi/d;
double La1 = period*(double)Nperiods;
// printf("CurrentB2: %g, %g, %g, %g, %g \n", La1, Ld1, La2, Ld2, Lb);
par.la1 = La1; par.lb = Lb; par.ld1 = Ld1; par.k1 = k1; par.h = h; par.voltage = voltage;
par.la2 = La2; par.ld2 = Ld2;
par.Nz = Nz; par.L = Lsolver;
par.wall = wall; par.g1 = g1; par.g3 = g3;
par.Wmax = d_Wmax; par.Wmin = d_Wmin;
par.rJ3 = d_rJ3; par.iJ3 = d_iJ3;
par.avEN = d_avEN;
par.int_rJ3 = d_int_rJ3;
par.int_iJ3 = d_int_iJ3;
double2 B; B.x = reB; B.y = imB;
double2 Astat2 ={A2.real(), A2.imag()};
dim3 numblocks(Nq/NQ, Ns/NS, Nv);
dim3 threadsPerBlock(NP, NQ, NS);
MotionEquationMultiplierDoubleScheme << <numblocks, threadsPerBlock >> >(par, La1 + Ld1 + La2 + Ld2 + Lb, 3, A, Astat2, B);
return retriveBCurr();
}
std::complex<double> Multiplier::CurrentA(double reA, double imA)
{
PAR par;
double d = period;
double h = 2.*Pi/d;
double La = period*double(Nperiods);
par.la = La; par.lb = 1.; par.ld = 1.; par.k1 = k1; par.h = h; par.voltage = voltage;
par.Nz = Nz; par.L = Lsolver; par.wall = wall;
par.g1 = g1; par.g3 = g3;
par.Wmax = d_Wmax; par.Wmin = d_Wmin;
par.rJ3 = d_rJ3; par.iJ3 = d_iJ3;
par.avEN = d_avEN;
par.int_rJ3 = d_int_rJ3;
par.int_iJ3 = d_int_iJ3;
double2 zero = {0,0};
dim3 threadsPerBlock(NP, NQ, NS);
dim3 numblocks(Nq / NQ, Ns / NS, Nv);
double A; A = sqrt(reA*reA + imA*imA);
// printf("\n B loop: %g\n", La+Ld+Lb );
// printf("\n Threads: %i, %i, %i\n", threadsPerBlock.x, threadsPerBlock.y, threadsPerBlock.z );
// cudaMemcpy( d_rJ3, &dzero, sizeof(double), cudaMemcpyHostToDevice);
// cudaMemcpy( d_iJ3, &dzero, sizeof(double), cudaMemcpyHostToDevice);
// cudaMemcpy(d_int_rJ3, &dzero, sizeof(double), cudaMemcpyHostToDevice);
// cudaMemcpy(d_int_iJ3, &dzero, sizeof(double), cudaMemcpyHostToDevice);
// cudaMemcpy( d_avEN, &dzero, sizeof(double), cudaMemcpyHostToDevice);
gpuErrChk(cudaMemcpy(d_par, &par, sizeof(PAR), cudaMemcpyHostToDevice));
MotionEquationMultiplier << <numblocks, threadsPerBlock >> >(d_par, La, 1, A, zero);
return retriveBCurr()*exp(I*arg(reA + I*imA));
}
std::complex<double> MultiplierThreeCavity::CurrentA2(double A1, double reA, double imA)
{
PAR par;
double d = period;
double h = 2.*Pi/d;
double La1 = period*double(Nperiods);
par.la1 = La1; par.la2 = La2; par.ld1 = Ld1;
par.lb = 1.; par.ld = 1.; par.k1 = k1; par.h = h; par.voltage = voltage;
par.Nz = Nz; par.L = Lsolver; par.wall = wall;
par.g1 = g1; par.g3 = g3;
par.Wmax = d_Wmax; par.Wmin = d_Wmin;
par.rJ3 = d_rJ3; par.iJ3 = d_iJ3;
par.avEN = d_avEN;
par.int_rJ3 = d_int_rJ3;
par.int_iJ3 = d_int_iJ3;
double2 zero = {0,0};
double2 A = {reA, imA};
dim3 threadsPerBlock(NP, NQ, NS);
dim3 numblocks(Nq/NQ, Ns/NS, Nv);
MotionEquationMultiplierDoubleScheme <<< numblocks, threadsPerBlock >>>(par, La1 + La2 + Ld1, 1, A1, A, zero);
return retriveBCurr();
}
void MultiplierMultiModes::CurrentAMultiModes(std::complex<double> *Amps, std::complex<double> * currs, double *buffRe, double *buffIm, int Na, cplx *J1, cplx *J2)
{
PAR par;
double d = period;
double h = 2.*Pi/d;
double La = period*double(Nperiods);
int Nstop = La/dz;
par.la1 = La; par.ld = Ld;
par.lb = 1.; par.k1 = k1; par.h = h; par.voltage = voltage;
par.Nz = Nz; par.L = Lsolver; par.wall = wall;
par.g1 = g1; par.g3 = g3;
par.rJ3 = d_rJ3; par.iJ3 = d_iJ3;
par.avEN = d_avEN;
par.int_rJ3 = d_int_rJ3;
par.int_iJ3 = d_int_iJ3;
par.int_rJ3_1 = d_int_rJ3_1;
par.int_iJ3_1 = d_int_iJ3_1;
double2 zero = {0,0};
dim3 threadsPerBlock(NP, NQ, NS);
dim3 numblocks(Nq/NQ, Ns/NS, Nv);
par.Amps = (double2*) d_Amps;
int ierr = cudaMemcpy(d_Amps, (void*) Amps, sizeof(double2)*Na, cudaMemcpyHostToDevice);
MotionEquationMultiplierMultiModes <<< numblocks, threadsPerBlock >>>(par, La, 1, Na, zero);
gpuErrChk(cudaPeekAtLastError());
retriveACurrComplex((std::complex<double>*)Amps, currs, buffRe, buffIm, Namm, Nstop);
}
void MultiplierMultiModes::retriveACurrComplex(std::complex<double> *Amps, std::complex<double> *currs, double *currsBuffRe, double *currsBuffIm, int Na, int Nstop)
{
int GQ = Nq / NQ; int GS = Ns / NS; int GV = Nv;
double reJ = 0, imJ = 0;
double rF, iF, z;
double La = period*double(Nperiods);
std::complex<double> J;
// printf("memcpy: %i\t", cudaMemcpy((void *)&t_deltaEn, d_int_rJ3, sizeof(double), cudaMemcpyDeviceToHost));
// printf("memcpy: %i\n", cudaMemcpy((void *)&t_deltaEn2, d_int_iJ3, sizeof(double), cudaMemcpyDeviceToHost));
gpuErrChk(cudaMemcpy((void *)currsBuffRe, d_rJ3, sizeof(double)*GQ*GS*GV*Nmax, cudaMemcpyDeviceToHost))
gpuErrChk(cudaMemcpy((void *)currsBuffIm, d_iJ3, sizeof(double)*GQ*GS*GV*Nmax, cudaMemcpyDeviceToHost))
for (int a = 0; a < Na; a++)
{
currs[a] = 0;
}
// FILE* debugfile = fopen("F:\\Piotr\\CalcData\\mm_orotron_Data\\debug.txt", "w");
for (int j = 0; j < Nstop; j++)
{
reJ = 0; imJ = 0;
for (int i = 0; i < GQ*GS*GV; i++)
{
reJ += currsBuffRe[i*Nmax + j]; imJ += currsBuffIm[i*Nmax + j];
}
for (int a = 0; a < Na; a++)
{
z = (double)j * dz;
sincos(Pi / La*z*double(a - Na / 2), &iF, &rF);
J = cplx(reJ, imJ)*cplx(rF, -iF);
currs[a] += (J);
// if(a == 1) fprintf(debugfile, "%g,%g,%g,%g,%g\n",z, real(J)/double(Np*Nq*Ns*Nv), imag(J)/double(Np*Nq*Ns*Nv), abs(J)/double(Np*Nq*Ns*Nv), arg(J) );
}
}
double coeff = Lsolver / double(Nz*Np*Nq*Ns*Nv);
for (int a = 0; a < Na; a++) currs[a] *= coeff;
// fclose(debugfile);
}
//////////////////////////////////
ParamsM Device::setPar()
{
ParamsM par;
int GQ = Nq/NQ; int GS = Ns/NS; int GV = Nv;
int gridsize = GQ*GS*GV;
double La = Nperiods*period;
double h = 2.*Pi/period;
par.la = La; par.k1 = k1; par.h = h; par.voltage = voltage;
par.Nz = Nmax; par.L = Lmax; par.wall = wall;
par.g1 = g1; par.Ngrid = gridsize;
par.ar0 = d_ar0; par.ai0 = d_ai0;
par.rJ3 = d_rJ3; par.iJ3 = d_iJ3;
par.delta = 0;
par.Q0 = d_Q0; par.W0 = d_W0;
par.rAk = d_rAk; par.iAk = d_iAk;
par.rAq1k = d_rAq1k; par.iAq1k = d_iAq1k;
par.Qk = d_Qk; par.Wk = d_Wk;
par.ar0_t = d_ar0_t; par.ai0_t = d_ai0_t;
par.int_rQ1 = d_int_rQ1;
par.int_iQ1 = d_int_iQ1;
par.ifnotdestroyed = d_ifnotdestroyed;
par.g3 = g3;
par.rAq1 =d_rAq1;
par.iAq1 =d_iAq1;
par.radii = d_radii;
par.int_rJ3 = d_int_rJ3;
par.int_iJ3 = d_int_iJ3;
par.int_rJ3_1 = d_int_rJ3_1;
par.int_iJ3_1 = d_int_iJ3_1;
par.avEN = d_avEN;
int *mass = new int [Np*Nq*Ns*Nv];
for(int a = 0; a < Np*Nq*Ns*Nv; a++) mass[a] = 1;
gpuErrChk(cudaMemcpy(d_ifnotdestroyed, mass, sizeof(int)*Np*Nq*Ns*Nv, cudaMemcpyHostToDevice));
delete [] mass;
return par;
} |
0a58180450cdafbf5a222dff1b2c0382af754a10.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <vector>
#include <opencv2/opencv.hpp>
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "rocblas.h"
#include <chrono>
#include <random>
#include </root/cuda-workspace/knnGPU/src/deviceheader.h>
using namespace std;
using namespace cv;
// C(m,n) = A(m,k) * B(k,n)
hipStream_t stream[900];
//Variables for each map/featuremaps
struct Map {
float **map;
float *d_map;
float **weights;
float *d_weights;
float **target;
float *d_target;
float **delta;
float *d_delta;
float **deltabuffer;
float *d_deltabuffer;
float **deltazeropadded;
float *d_deltazeropadded;
};
int richtig;
int falsch;
class Layer {
public:
int type;
int amount;
int height;
int width;
int wheight; //WeightsHeight
int wwidth; //WeightsWidth
float error = 0.0;
vector<Map> Maps;
Layer(int t, int a, int h, int w, int wh, int ww, float var) {
unsigned seed = time(NULL);
std::default_random_engine generator(seed);
type = t;
amount = a;
height = h;
width = w;
wheight = wh;
wwidth = ww;
//Create as many internal Layers as needed
for (int c = 0; c < amount; c++) {
Maps.push_back(Map());
//Create Map
Maps[c].map = new float*[height];
for (int i = 0; i < height; i++) {
Maps[c].map[i] = new float[width];
}
//Create Weights
Maps[c].weights = new float*[wheight];
for (int i = 0; i < wheight; i++) {
Maps[c].weights[i] = new float[wwidth];
}
//Initial Weights
for (int i = 0; i < wheight; i++) {
for (int e = 0; e < wwidth; e++) {
std::normal_distribution<float> distribution(0.0,
sqrt(var));
Maps[c].weights[i][e] = distribution(generator);
}
}
//Create Target
if (type == 2) {
Maps[c].target = new float*[height];
for (int i = 0; i < height; i++) {
Maps[c].target[i] = new float[width];
}
//Allocate Space for Target
hipMalloc((void**) &Maps[c].d_target, height * width * 4);
}
//Create delta
Maps[c].delta = new float*[h];
for (int i = 0; i < h; i++) {
Maps[c].delta[i] = new float[w];
}
Maps[c].deltabuffer = new float*[wheight];
for (int i = 0; i < wheight; i++) {
Maps[c].deltabuffer[i] = new float[wwidth];
}
if (type == 3) {
Maps[c].deltazeropadded = new float*[height + 4];
for (int i = 0; i < height + 4; i++) {
Maps[c].deltazeropadded[i] = new float[width + 4];
}
hipMalloc((void**) &Maps[c].d_deltazeropadded,
(height + 4) * (width + 4) * 4);
}
hipMalloc((void**) &Maps[c].d_deltabuffer, wheight * wwidth * 4);
hipMalloc((void**) &Maps[c].d_delta, height * width * 4);
//Allocate Space for Map on GPU
hipMalloc((void**) &Maps[c].d_map, height * width * 4);
//Copy Weights to GPU
hipMalloc((void**) &Maps[c].d_weights, wheight * wwidth * 4);
for (int i = 0; i < wheight; i++) {
hipMemcpy(Maps[c].d_weights + i * wwidth, Maps[c].weights[i],
wwidth * 4, hipMemcpyHostToDevice);
}
}
cout << "Type: " << type << endl;
cout << "Maps: " << amount << endl;
cout << "Height x Width: " << height << "x" << width << endl;
cout << "Weightsheight x Weightswidth: " << wheight << "x" << wwidth
<< endl;
cout << "Initialized" << endl << endl << endl;
}
virtual void loadData(int a, int b, Layer*& network) = 0;
virtual void showLayer() = 0;
void feedForward(Layer*& network) {
if (network->type == 1 || network->type == 2) {
for (int t = 0; t < network->amount; t++) {
hipLaunchKernelGGL(( resetmap), dim3(network->height), dim3(network->width), 0, 0,
network->Maps[t].d_map);
}
for (int t = 0; t < network->amount; t++) {
for (int c = 0; c < amount; c++) {
hipLaunchKernelGGL(( fcForward), dim3(wheight), dim3(wwidth), 0,
stream[network->amount * t + c], Maps[c].d_map,
Maps[c].d_weights, network->Maps[t].d_map, wheight,
wwidth);
}
}
for (int t = 0; t < network->amount; t++) {
hipLaunchKernelGGL(( sigmoid), dim3(network->height), dim3(network->width), 0, 0,
network->Maps[t].d_map);
}
}
if (network->type == 3) {
for (int t = 0; t < network->amount; t++) {
for (int c = 0; c < amount; c++) {
hipLaunchKernelGGL(( cvForward), dim3(network->height), dim3(network->width), 0,
stream[network->amount * c + t], Maps[c].d_map,
Maps[c].d_weights, network->Maps[t].d_map, width,
wwidth);
}
}
for (int t = 0; t < network->amount; t++) {
hipLaunchKernelGGL(( sigmoid), dim3(network->height), dim3(network->width), 0, 0,
network->Maps[t].d_map);
}
}
if (network->type == 4) {
for (int c = 0; c < amount; c++) {
hipLaunchKernelGGL(( maxForward), dim3(network->height), dim3(network->width), 0, 0, Maps[c].d_map,
network->Maps[c].d_map, width);
}
}
if (network->type == 2) {
for (int t = 0; t < network->amount; t++) {
hipLaunchKernelGGL(( outputdeltaGPU), dim3(network->height), dim3(network->width), 0, 0,
network->Maps[t].d_delta, network->Maps[t].d_map,
network->Maps[t].d_target);
}
}
}
void backpropagation(Layer*& network) {
if (type == 1 || type == 2) {
for (int t = 0; t < network->amount; t++) {
for (int c = 0; c < amount; c++) {
hipLaunchKernelGGL(( fcBackpropagation), dim3(network->height), dim3(network->width), 0,
stream[network->amount * c + t], Maps[c].d_delta,
network->Maps[t].d_weights,
network->Maps[t].d_delta, network->Maps[t].d_map,
network->width * network->height, width * height);
}
}
}
if (type == 3) {
for (int c = 0; c < amount; c++) {
hipLaunchKernelGGL(( zeropadding), dim3(height + 4), dim3(width + 4), 0, 0,
Maps[c].d_deltazeropadded, Maps[c].d_delta, width,
network->wwidth);
}
for (int t = 0; t < network->amount; t++) {
for (int c = 0; c < amount; c++) {
hipLaunchKernelGGL(( cvBackward), dim3(network->height), dim3(network->width), 0,
stream[network->amount * c + t],
Maps[c].d_deltazeropadded,
network->Maps[t].d_weights,
network->Maps[t].d_delta, network->Maps[t].d_map,
(width + 4), network->wwidth);
}
}
}
if (type == 4) {
for (int t = 0; t < network->amount; t++) {
hipLaunchKernelGGL(( resetmap), dim3(network->height), dim3(network->width), 0, 0,
network->Maps[t].d_delta);
}
for (int t = 0; t < network->amount; t++) {
hipLaunchKernelGGL(( maxBackward), dim3(height), dim3(width), 0, 0, network->Maps[t].d_map,
network->Maps[t].d_delta, Maps[t].d_delta,
network->width);
}
}
}
void update(Layer*& network) {
if (type == 1 || type == 2) {
for (int t = 0; t < network->amount; t++) {
for (int c = 0; c < amount; c++) {
hipLaunchKernelGGL(( fcUpdate), dim3(network->width * network->height), dim3(width * height), 0, 0,
network->Maps[t].d_weights, Maps[c].d_delta,
network->Maps[t].d_map);
}
}
}
if (type == 3) {
for (int t = 0; t < network->amount; t++) {
for (int c = 0; c < amount; c++) {
hipLaunchKernelGGL(( cvUpdate), dim3(height), dim3(width), 0, stream[network->amount * c + t],
network->Maps[t].d_deltabuffer, Maps[c].d_delta,
network->Maps[t].d_map, network->width,
network->wwidth);
}
}
for (int t = 0; t < network->amount; t++) {
hipLaunchKernelGGL(( cvAdd), dim3(network->wheight), dim3(network->wwidth), 0, 0,
network->Maps[t].d_deltabuffer,
network->Maps[t].d_weights);
}
}
}
virtual ~Layer() {
}
};
class Input: public Layer {
public:
Input(int t, int a, int h, int w, int wh, int ww, float var) :
Layer(t, a, h, w, wh, ww, var) {
}
void loadData(int a, int b, Layer*& network) {
Mat image = imread(
"/root/cuda-workspace/knnGPU/src/dataset/" + to_string(a) + "/"
+ to_string(b) + ".jpg", 0);
for (int c = 0; c < amount; c++) {
for (int x = 0; x < image.size().height; x++) {
for (int y = 0; y < image.size().width; y++) {
int val = static_cast<float>(image.at<uchar>(x, y));
Maps[c].map[x][y] = val / 255;
}
}
for (int i = 0; i < height; i++) {
hipMemcpy(Maps[c].d_map + i * width, Maps[c].map[i], width * 4,
hipMemcpyHostToDevice);
}
for (int i = 0; i < network->width; i++) {
if (i == a) {
network->Maps[c].target[0][i] = 1;
} else {
network->Maps[c].target[0][i] = 0;
}
}
for (int i = 0; i < network->height; i++) {
hipMemcpy(network->Maps[c].d_target + i * network->width,
network->Maps[c].target[i], network->width * 4,
hipMemcpyHostToDevice);
}
}
}
void showLayer() {
for (int c = 0; c < amount; c++) {
for (int i = 0; i < height; i++) {
hipMemcpy(Maps[c].map[i], Maps[c].d_map + i * width, width * 4,
hipMemcpyDeviceToHost);
}
unsigned char res[28][28];
for (int i = 0; i < 28; i++) {
for (int e = 0; e < 28; e++) {
res[i][e] = Maps[c].map[i][e];
}
}
Mat src = Mat(28, 28, CV_8UC1, res);
imshow("Original", src);
//waitKey(0);
}
}
};
class Fullyconnected: public Layer {
public:
Fullyconnected(int t, int a, int h, int w, int wh, int ww, float var) :
Layer(t, a, h, w, wh, ww, var) {
}
void loadData(int a, int b, Layer*& network) {
//NOT USED IN THIS LAYER
}
void showLayer() {
/*for (int c = 0; c < amount; c++) {
for (int i = 0; i < height; i++) {
hipMemcpy(Maps[c].map[i], Maps[c].d_map + i * width, width * 4, hipMemcpyDeviceToHost);
}
for (int i = 0; i < height; i++) {
hipMemcpy(Maps[c].delta[i], Maps[c].d_delta + i * width, width * 4, hipMemcpyDeviceToHost);
}
for (int i = 0; i < width; i++) {
//cout << Maps[c].map[0][i] << " " << Maps[c].delta[0][i] << endl;
}
cout << endl;
}*/
}
};
class Convolution: public Layer {
public:
Convolution(int t, int a, int h, int w, int wh, int ww, float var) :
Layer(t, a, h, w, wh, ww, var) {
}
void loadData(int a, int b, Layer*& network) {
//NOT USED IN THIS LAYER
}
void showLayer() {
/*for (int c = 0; c < amount; c++) {
for (int i = 0; i < height; i++) {
hipMemcpy(Maps[c].map[i], Maps[c].d_map + i * width, width * 4, hipMemcpyDeviceToHost);
}
for (int i = 0; i < height; i++) {
hipMemcpy(Maps[c].delta[i], Maps[c].d_delta + i * width, width * 4, hipMemcpyDeviceToHost);
}
for (int i = 0; i < height + 4; i++) {
hipMemcpy(Maps[c].deltazeropadded[i], Maps[c].d_deltazeropadded + i * (width + 4), (width + 4) * 4, hipMemcpyDeviceToHost);
}
for (int i = 0; i < width; i++) {
for (int e = 0; e < height; e++) {
cout << Maps[c].map[e][i] << " ";
}
cout << endl;
}
cout << endl;
}*/
}
};
class Maxpooling: public Layer {
public:
Maxpooling(int t, int a, int h, int w, int wh, int ww, float var) :
Layer(t, a, h, w, wh, ww, var) {
}
void loadData(int a, int b, Layer*& network) {
//NOT USED IN THIS LAYER
}
void showLayer() {
/*for (int c = 0; c < amount; c++) {
for (int i = 0; i < height; i++) {
hipMemcpy(Maps[c].map[i], Maps[c].d_map + i * width, width * 4, hipMemcpyDeviceToHost);
}
for (int i = 0; i < height; i++) {
hipMemcpy(Maps[c].delta[i], Maps[c].d_delta + i * width, width * 4, hipMemcpyDeviceToHost);
}
for (int i = 0; i < width; i++) {
for (int e = 0; e < height; e++) {
cout << Maps[c].map[e][i] << " ";
}
cout << endl;
}
cout << endl;
}*/
}
};
class Output: public Layer {
public:
Output(int t, int a, int h, int w, int wh, int ww, float var) :
Layer(t, a, h, w, wh, ww, var) {
}
void loadData(int a, int b, Layer*& network) {
//NOT USED IN THIS LAYER
}
void showLayer() {
for (int c = 0; c < amount; c++) {
error = 0.0;
for (int i = 0; i < height; i++) {
hipMemcpy(Maps[c].map[i], Maps[c].d_map + i * width, width * 4,
hipMemcpyDeviceToHost);
}
for (int i = 0; i < height; i++) {
hipMemcpy(Maps[c].target[i], Maps[c].d_target + i * width,
width * 4, hipMemcpyDeviceToHost);
}
for (int i = 0; i < height; i++) {
hipMemcpy(Maps[c].delta[i], Maps[c].d_delta + i * width,
width * 4, hipMemcpyDeviceToHost);
}
for (int i = 0; i < width; i++) {
error += 0.5 * (Maps[c].target[0][i] - Maps[c].map[0][i])
* (Maps[c].target[0][i] - Maps[c].map[0][i]);
}
for (int i = 0; i < width; i++) {
cout << Maps[c].map[0][i] << " " << Maps[c].target[0][i]
<< " " << Maps[c].delta[0][i] << endl;
}
int result = 0;
for (int i = 1; i < 10; i++) {
if (Maps[0].map[0][i]
>Maps[0].map[0][i - 1]
&& Maps[0].map[0][i]
> Maps[0].map[0][result]) {
result = i;
}
}
if(Maps[c].target[0][result] == 1){
cout << "Richtig" << endl;
richtig++;
}else{
cout << "Falsch" << endl;
falsch++;
}
cout << endl;
cout << error << endl << endl;
}
}
};
int imgHeight = 28;
int imgWidth = 28;
int samples = 300;
int outs = 10;
struct data {
float **red;
float *d_red;
float **green;
float *d_green;
float **blue;
float *d_blue;
};
vector<vector<data> > dataset(samples, vector<data>(outs));
void ldata(int i, int e) {
dataset[i][e].red = new float*[imgHeight];
for (int f = 0; f < imgHeight; f++) {
dataset[i][e].red[f] = new float[imgWidth];
}
dataset[i][e].green = new float*[imgHeight];
for (int f = 0; f < imgHeight; f++) {
dataset[i][e].green[f] = new float[imgWidth];
}
dataset[i][e].blue = new float*[imgHeight];
for (int f = 0; f < imgHeight; f++) {
dataset[i][e].blue[f] = new float[imgWidth];
}
//////////////////////////////////////
Mat image = imread(
"/root/cuda-workspace/knnGPU/src/dataset/" + to_string(e) + "/"
+ to_string(i) + ".jpg", 1);
for (int x = 0; x < image.size().height; x++) {
for (int y = 0; y < image.size().width; y++) {
//load data into array and normalize values
dataset[i][e].blue[x][y] = image.at<cv::Vec3b>(y, x)[0] / 255;
dataset[i][e].green[x][y] = image.at<cv::Vec3b>(y, x)[1] / 255;
dataset[i][e].red[x][y] = image.at<cv::Vec3b>(y, x)[2] / 255;
}
}
//Allocate Device Memory on device (GPU)
hipMalloc((void**) &dataset[i][e].d_red, imgWidth * imgHeight * 4);
hipMalloc((void**) &dataset[i][e].d_green, imgWidth * imgHeight * 4);
hipMalloc((void**) &dataset[i][e].d_blue, imgWidth * imgHeight * 4);
}
void cdata(int i, int e) {
for (int f = 0; f < imgHeight; f++) {
//copy data into device (GPU)
hipMemcpy(dataset[i][e].d_red + f * imgWidth, dataset[i][e].red[f],
imgWidth * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dataset[i][e].d_green + f * imgWidth, dataset[i][e].green[f],
imgWidth * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dataset[i][e].d_blue + f * imgWidth, dataset[i][e].blue[f],
imgWidth * sizeof(float), hipMemcpyHostToDevice);
}
}
vector<Layer*> network;
int topology[5][6] = { { 0, 3, imgHeight, imgWidth, 5, 5 }, { 3, 5, 24, 24, 0,
0 }, { 4, 5, 12, 12, 12 * 12, 100 }, { 1, 1, 1, 100, 100, outs }, { 2,
1, 1, outs, 0, 0 } };
int length = (sizeof(topology) / sizeof(topology[0]));
//Interface
Mat img = Mat(1080, 1920, CV_8UC3, Scalar(0, 0, 0));
bool down = false;
Mat cropped;
Mat gray;
Mat digit = Mat(28, 28, CV_8UC3, Scalar(0, 0, 0));
void CallBackFunc(int event, int x, int y, int flags, void* userdata) {
if (event == EVENT_LBUTTONDOWN) {
down = true;
}
if (event == EVENT_LBUTTONUP) {
down = false;
}
if (event == EVENT_MOUSEMOVE) {
if (down == true) {
circle(img, Point(x, y), 20, Scalar(255, 255, 255), CV_FILLED, 8,
0);
}
imshow("Drawing Window", img);
}
if (event == EVENT_LBUTTONUP) {
cvtColor(img, gray, CV_BGR2GRAY);
//threshold(gray, gray, 0, 55, THRESH_BINARY_INV);
int largest_area = 0;
int largest_contour_index = 0;
Rect bounding_rect;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours(gray, contours, hierarchy, CV_RETR_CCOMP,
CV_CHAIN_APPROX_SIMPLE);
for (int i = 0; i < contours.size(); i++) {
double a = contourArea(contours[i], false);
if (a > largest_area) {
largest_area = a;
cout << i << " area " << a << endl;
largest_contour_index = i;
bounding_rect = boundingRect(contours[i]);
}
}
img(bounding_rect).copyTo(cropped); //Zahl wird ausgeschnitten und in "cropped" kopiert
resize(cropped, digit, cvSize(28, 28));
imshow("Digit", digit);
imwrite("/root/cuda-workspace/knnGPU/src/dataset/999/0.jpg", digit);
ldata(0, 999); //load cropped digit
cdata(0, 999); //copy cropped digit into device
//set digit as Input (3 channels)
hipLaunchKernelGGL(( setData), dim3(imgWidth), dim3(imgWidth), 0, 0, dataset[0][999].d_red,
network[0]->Maps[0].d_map);
hipLaunchKernelGGL(( setData), dim3(imgWidth), dim3(imgWidth), 0, 0, dataset[0][999].d_green,
network[0]->Maps[1].d_map);
hipLaunchKernelGGL(( setData), dim3(imgWidth), dim3(imgWidth), 0, 0, dataset[0][999].d_green,
network[0]->Maps[2].d_map);
for (int i = 0; i < length - 1; i++) {
network[i]->feedForward(network[i + 1]);
}
for (int i = 0; i < 1; i++) {
hipMemcpy(network[length - 1]->Maps[0].map[i],
network[length - 1]->Maps[0].d_map + i * outs, outs * 4,
hipMemcpyDeviceToHost);
}
for (int i = 0; i < outs; i++) {
cout << network[length - 1]->Maps[0].map[0][i] << endl;
}
cout << endl << endl;
int result = 0;
for (int i = 1; i < outs; i++) {
if (network[length - 1]->Maps[0].map[0][i]
> network[length - 1]->Maps[0].map[0][i - 1]
&& network[length - 1]->Maps[0].map[0][i]
> network[length - 1]->Maps[0].map[0][result]) {
result = i;
}
}
rectangle(img, Point(10, 10), Point(100, 40), Scalar(0, 0, 0), -1, 8);
putText(img, "Zahl: " + to_string(result), cvPoint(30, 30),
FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0, 0, 255), 1, CV_AA);
}
if (event == EVENT_RBUTTONDOWN) {
img = Scalar(0, 0, 0);
}
}
int main() {
//Create 900 cudastreams
for (int i = 0; i < 900; i++) {
hipStreamCreate(&stream[i]);
}
//load data from dataset into program
for (int i = 0; i < samples; i++) {
for (int e = 0; e < outs; e++) {
ldata(i, e);
}
}
//copy data into device
for (int i = 0; i < samples; i++) {
for (int e = 0; e < outs; e++) {
cdata(i, e);
}
}
//Initialize Network
float var;
for (int i = 0; i < length; i++) {
//Calculate Variation for Xavier Initialization
var = 0.0;
if (topology[i + 1][0] == 3) {
var = topology[i][4] * topology[i][5] * topology[i][1];
}
if (topology[i + 1][0] == 2 || topology[i + 1][0] == 1) {
var = topology[i][4] * topology[i][1];
}
var = 1.0 / var;
//Create Layerobject in the vector network with parameters from topology
if (topology[i][0] == 0) {
network.push_back(
new Input(topology[i][0], topology[i][1], topology[i][2],
topology[i][3], topology[i][4], topology[i][5],
var));
}
if (topology[i][0] == 1) {
network.push_back(
new Fullyconnected(topology[i][0], topology[i][1],
topology[i][2], topology[i][3], topology[i][4],
topology[i][5], var));
}
if (topology[i][0] == 2) {
network.push_back(
new Output(topology[i][0], topology[i][1], topology[i][2],
topology[i][3], topology[i][4], topology[i][5],
var));
}
if (topology[i][0] == 3) {
network.push_back(
new Convolution(topology[i][0], topology[i][1],
topology[i][2], topology[i][3], topology[i][4],
topology[i][5], var));
}
if (topology[i][0] == 4) {
network.push_back(
new Maxpooling(topology[i][0], topology[i][1],
topology[i][2], topology[i][3], topology[i][4],
topology[i][5], var));
}
}
for (int l = 0; l < 200; l++) {
for (int b = 1; b < 190; b++) {
for (int a = 0; a < outs; a++) {
//Load Input and Target into the network
hipLaunchKernelGGL(( setData), dim3(imgWidth), dim3(imgWidth), 0, 0, dataset[b][a].d_red,
network[0]->Maps[0].d_map);
hipLaunchKernelGGL(( setData), dim3(imgWidth), dim3(imgWidth), 0, 0, dataset[b][a].d_green,
network[0]->Maps[1].d_map);
hipLaunchKernelGGL(( setData), dim3(imgWidth), dim3(imgWidth), 0, 0, dataset[b][a].d_green,
network[0]->Maps[2].d_map);
hipLaunchKernelGGL(( setTarget), dim3(1), dim3(outs), 0, 0, network[length - 1]->Maps[0].d_target,
a);
hipDeviceSynchronize();
//FeedForward
for (int i = 0; i < length - 1; i++) {
network[i]->feedForward(network[i + 1]);
}
//Backpropagate the error
for (int i = length - 1; i > 0; i--) {
network[i]->backpropagation(network[i - 1]);
}
//Update weights based on bp results
for (int i = 1; i < length; i++) {
network[i]->update(network[i - 1]);
}
//ShowMaps
}
cout << l << endl;
}
}
for (int b = 190; b < 200; b++) {
for (int a = 0; a < outs; a++) {
hipLaunchKernelGGL(( setData), dim3(imgWidth), dim3(imgWidth), 0, 0, dataset[b][a].d_red,
network[0]->Maps[0].d_map);
hipLaunchKernelGGL(( setData), dim3(imgWidth), dim3(imgWidth), 0, 0, dataset[b][a].d_green,
network[0]->Maps[1].d_map);
hipLaunchKernelGGL(( setData), dim3(imgWidth), dim3(imgWidth), 0, 0, dataset[b][a].d_green,
network[0]->Maps[2].d_map);
hipLaunchKernelGGL(( setTarget), dim3(1), dim3(outs), 0, 0, network[length - 1]->Maps[0].d_target, a);
for (int i = 0; i < length - 1; i++) {
network[i]->feedForward(network[i + 1]);
}
for (int i = 0; i < length; i++) {
network[i]->showLayer();
}
}
}
namedWindow("Drawing Window", 1);
imshow("Drawing Window", img);
setMouseCallback("Drawing Window", CallBackFunc, NULL);
waitKey(0);
cout << richtig << endl;
cout << falsch << endl;
}
| 0a58180450cdafbf5a222dff1b2c0382af754a10.cu | #include <iostream>
#include <vector>
#include <opencv2/opencv.hpp>
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "cublas_v2.h"
#include <chrono>
#include <random>
#include </root/cuda-workspace/knnGPU/src/deviceheader.h>
using namespace std;
using namespace cv;
// C(m,n) = A(m,k) * B(k,n)
cudaStream_t stream[900];
//Variables for each map/featuremaps
struct Map {
float **map;
float *d_map;
float **weights;
float *d_weights;
float **target;
float *d_target;
float **delta;
float *d_delta;
float **deltabuffer;
float *d_deltabuffer;
float **deltazeropadded;
float *d_deltazeropadded;
};
int richtig;
int falsch;
class Layer {
public:
int type;
int amount;
int height;
int width;
int wheight; //WeightsHeight
int wwidth; //WeightsWidth
float error = 0.0;
vector<Map> Maps;
Layer(int t, int a, int h, int w, int wh, int ww, float var) {
unsigned seed = time(NULL);
std::default_random_engine generator(seed);
type = t;
amount = a;
height = h;
width = w;
wheight = wh;
wwidth = ww;
//Create as many internal Layers as needed
for (int c = 0; c < amount; c++) {
Maps.push_back(Map());
//Create Map
Maps[c].map = new float*[height];
for (int i = 0; i < height; i++) {
Maps[c].map[i] = new float[width];
}
//Create Weights
Maps[c].weights = new float*[wheight];
for (int i = 0; i < wheight; i++) {
Maps[c].weights[i] = new float[wwidth];
}
//Initial Weights
for (int i = 0; i < wheight; i++) {
for (int e = 0; e < wwidth; e++) {
std::normal_distribution<float> distribution(0.0,
sqrt(var));
Maps[c].weights[i][e] = distribution(generator);
}
}
//Create Target
if (type == 2) {
Maps[c].target = new float*[height];
for (int i = 0; i < height; i++) {
Maps[c].target[i] = new float[width];
}
//Allocate Space for Target
cudaMalloc((void**) &Maps[c].d_target, height * width * 4);
}
//Create delta
Maps[c].delta = new float*[h];
for (int i = 0; i < h; i++) {
Maps[c].delta[i] = new float[w];
}
Maps[c].deltabuffer = new float*[wheight];
for (int i = 0; i < wheight; i++) {
Maps[c].deltabuffer[i] = new float[wwidth];
}
if (type == 3) {
Maps[c].deltazeropadded = new float*[height + 4];
for (int i = 0; i < height + 4; i++) {
Maps[c].deltazeropadded[i] = new float[width + 4];
}
cudaMalloc((void**) &Maps[c].d_deltazeropadded,
(height + 4) * (width + 4) * 4);
}
cudaMalloc((void**) &Maps[c].d_deltabuffer, wheight * wwidth * 4);
cudaMalloc((void**) &Maps[c].d_delta, height * width * 4);
//Allocate Space for Map on GPU
cudaMalloc((void**) &Maps[c].d_map, height * width * 4);
//Copy Weights to GPU
cudaMalloc((void**) &Maps[c].d_weights, wheight * wwidth * 4);
for (int i = 0; i < wheight; i++) {
cudaMemcpy(Maps[c].d_weights + i * wwidth, Maps[c].weights[i],
wwidth * 4, cudaMemcpyHostToDevice);
}
}
cout << "Type: " << type << endl;
cout << "Maps: " << amount << endl;
cout << "Height x Width: " << height << "x" << width << endl;
cout << "Weightsheight x Weightswidth: " << wheight << "x" << wwidth
<< endl;
cout << "Initialized" << endl << endl << endl;
}
virtual void loadData(int a, int b, Layer*& network) = 0;
virtual void showLayer() = 0;
void feedForward(Layer*& network) {
if (network->type == 1 || network->type == 2) {
for (int t = 0; t < network->amount; t++) {
resetmap<<<network->height, network->width>>>(
network->Maps[t].d_map);
}
for (int t = 0; t < network->amount; t++) {
for (int c = 0; c < amount; c++) {
fcForward<<<wheight, wwidth, 0,
stream[network->amount * t + c]>>>(Maps[c].d_map,
Maps[c].d_weights, network->Maps[t].d_map, wheight,
wwidth);
}
}
for (int t = 0; t < network->amount; t++) {
sigmoid<<<network->height, network->width>>>(
network->Maps[t].d_map);
}
}
if (network->type == 3) {
for (int t = 0; t < network->amount; t++) {
for (int c = 0; c < amount; c++) {
cvForward<<<network->height, network->width, 0,
stream[network->amount * c + t]>>>(Maps[c].d_map,
Maps[c].d_weights, network->Maps[t].d_map, width,
wwidth);
}
}
for (int t = 0; t < network->amount; t++) {
sigmoid<<<network->height, network->width>>>(
network->Maps[t].d_map);
}
}
if (network->type == 4) {
for (int c = 0; c < amount; c++) {
maxForward<<<network->height, network->width>>>(Maps[c].d_map,
network->Maps[c].d_map, width);
}
}
if (network->type == 2) {
for (int t = 0; t < network->amount; t++) {
outputdeltaGPU<<<network->height, network->width>>>(
network->Maps[t].d_delta, network->Maps[t].d_map,
network->Maps[t].d_target);
}
}
}
void backpropagation(Layer*& network) {
if (type == 1 || type == 2) {
for (int t = 0; t < network->amount; t++) {
for (int c = 0; c < amount; c++) {
fcBackpropagation<<<network->height, network->width, 0,
stream[network->amount * c + t]>>>(Maps[c].d_delta,
network->Maps[t].d_weights,
network->Maps[t].d_delta, network->Maps[t].d_map,
network->width * network->height, width * height);
}
}
}
if (type == 3) {
for (int c = 0; c < amount; c++) {
zeropadding<<<height + 4, width + 4>>>(
Maps[c].d_deltazeropadded, Maps[c].d_delta, width,
network->wwidth);
}
for (int t = 0; t < network->amount; t++) {
for (int c = 0; c < amount; c++) {
cvBackward<<<network->height, network->width, 0,
stream[network->amount * c + t]>>>(
Maps[c].d_deltazeropadded,
network->Maps[t].d_weights,
network->Maps[t].d_delta, network->Maps[t].d_map,
(width + 4), network->wwidth);
}
}
}
if (type == 4) {
for (int t = 0; t < network->amount; t++) {
resetmap<<<network->height, network->width>>>(
network->Maps[t].d_delta);
}
for (int t = 0; t < network->amount; t++) {
maxBackward<<<height, width>>>(network->Maps[t].d_map,
network->Maps[t].d_delta, Maps[t].d_delta,
network->width);
}
}
}
void update(Layer*& network) {
if (type == 1 || type == 2) {
for (int t = 0; t < network->amount; t++) {
for (int c = 0; c < amount; c++) {
fcUpdate<<<network->width * network->height, width * height>>>(
network->Maps[t].d_weights, Maps[c].d_delta,
network->Maps[t].d_map);
}
}
}
if (type == 3) {
for (int t = 0; t < network->amount; t++) {
for (int c = 0; c < amount; c++) {
cvUpdate<<<height, width, 0, stream[network->amount * c + t]>>>(
network->Maps[t].d_deltabuffer, Maps[c].d_delta,
network->Maps[t].d_map, network->width,
network->wwidth);
}
}
for (int t = 0; t < network->amount; t++) {
cvAdd<<<network->wheight, network->wwidth>>>(
network->Maps[t].d_deltabuffer,
network->Maps[t].d_weights);
}
}
}
virtual ~Layer() {
}
};
class Input: public Layer {
public:
Input(int t, int a, int h, int w, int wh, int ww, float var) :
Layer(t, a, h, w, wh, ww, var) {
}
void loadData(int a, int b, Layer*& network) {
Mat image = imread(
"/root/cuda-workspace/knnGPU/src/dataset/" + to_string(a) + "/"
+ to_string(b) + ".jpg", 0);
for (int c = 0; c < amount; c++) {
for (int x = 0; x < image.size().height; x++) {
for (int y = 0; y < image.size().width; y++) {
int val = static_cast<float>(image.at<uchar>(x, y));
Maps[c].map[x][y] = val / 255;
}
}
for (int i = 0; i < height; i++) {
cudaMemcpy(Maps[c].d_map + i * width, Maps[c].map[i], width * 4,
cudaMemcpyHostToDevice);
}
for (int i = 0; i < network->width; i++) {
if (i == a) {
network->Maps[c].target[0][i] = 1;
} else {
network->Maps[c].target[0][i] = 0;
}
}
for (int i = 0; i < network->height; i++) {
cudaMemcpy(network->Maps[c].d_target + i * network->width,
network->Maps[c].target[i], network->width * 4,
cudaMemcpyHostToDevice);
}
}
}
void showLayer() {
for (int c = 0; c < amount; c++) {
for (int i = 0; i < height; i++) {
cudaMemcpy(Maps[c].map[i], Maps[c].d_map + i * width, width * 4,
cudaMemcpyDeviceToHost);
}
unsigned char res[28][28];
for (int i = 0; i < 28; i++) {
for (int e = 0; e < 28; e++) {
res[i][e] = Maps[c].map[i][e];
}
}
Mat src = Mat(28, 28, CV_8UC1, res);
imshow("Original", src);
//waitKey(0);
}
}
};
class Fullyconnected: public Layer {
public:
Fullyconnected(int t, int a, int h, int w, int wh, int ww, float var) :
Layer(t, a, h, w, wh, ww, var) {
}
void loadData(int a, int b, Layer*& network) {
//NOT USED IN THIS LAYER
}
void showLayer() {
/*for (int c = 0; c < amount; c++) {
for (int i = 0; i < height; i++) {
cudaMemcpy(Maps[c].map[i], Maps[c].d_map + i * width, width * 4, cudaMemcpyDeviceToHost);
}
for (int i = 0; i < height; i++) {
cudaMemcpy(Maps[c].delta[i], Maps[c].d_delta + i * width, width * 4, cudaMemcpyDeviceToHost);
}
for (int i = 0; i < width; i++) {
//cout << Maps[c].map[0][i] << " " << Maps[c].delta[0][i] << endl;
}
cout << endl;
}*/
}
};
class Convolution: public Layer {
public:
Convolution(int t, int a, int h, int w, int wh, int ww, float var) :
Layer(t, a, h, w, wh, ww, var) {
}
void loadData(int a, int b, Layer*& network) {
//NOT USED IN THIS LAYER
}
void showLayer() {
/*for (int c = 0; c < amount; c++) {
for (int i = 0; i < height; i++) {
cudaMemcpy(Maps[c].map[i], Maps[c].d_map + i * width, width * 4, cudaMemcpyDeviceToHost);
}
for (int i = 0; i < height; i++) {
cudaMemcpy(Maps[c].delta[i], Maps[c].d_delta + i * width, width * 4, cudaMemcpyDeviceToHost);
}
for (int i = 0; i < height + 4; i++) {
cudaMemcpy(Maps[c].deltazeropadded[i], Maps[c].d_deltazeropadded + i * (width + 4), (width + 4) * 4, cudaMemcpyDeviceToHost);
}
for (int i = 0; i < width; i++) {
for (int e = 0; e < height; e++) {
cout << Maps[c].map[e][i] << " ";
}
cout << endl;
}
cout << endl;
}*/
}
};
class Maxpooling: public Layer {
public:
Maxpooling(int t, int a, int h, int w, int wh, int ww, float var) :
Layer(t, a, h, w, wh, ww, var) {
}
void loadData(int a, int b, Layer*& network) {
//NOT USED IN THIS LAYER
}
void showLayer() {
/*for (int c = 0; c < amount; c++) {
for (int i = 0; i < height; i++) {
cudaMemcpy(Maps[c].map[i], Maps[c].d_map + i * width, width * 4, cudaMemcpyDeviceToHost);
}
for (int i = 0; i < height; i++) {
cudaMemcpy(Maps[c].delta[i], Maps[c].d_delta + i * width, width * 4, cudaMemcpyDeviceToHost);
}
for (int i = 0; i < width; i++) {
for (int e = 0; e < height; e++) {
cout << Maps[c].map[e][i] << " ";
}
cout << endl;
}
cout << endl;
}*/
}
};
class Output: public Layer {
public:
Output(int t, int a, int h, int w, int wh, int ww, float var) :
Layer(t, a, h, w, wh, ww, var) {
}
void loadData(int a, int b, Layer*& network) {
//NOT USED IN THIS LAYER
}
void showLayer() {
for (int c = 0; c < amount; c++) {
error = 0.0;
for (int i = 0; i < height; i++) {
cudaMemcpy(Maps[c].map[i], Maps[c].d_map + i * width, width * 4,
cudaMemcpyDeviceToHost);
}
for (int i = 0; i < height; i++) {
cudaMemcpy(Maps[c].target[i], Maps[c].d_target + i * width,
width * 4, cudaMemcpyDeviceToHost);
}
for (int i = 0; i < height; i++) {
cudaMemcpy(Maps[c].delta[i], Maps[c].d_delta + i * width,
width * 4, cudaMemcpyDeviceToHost);
}
for (int i = 0; i < width; i++) {
error += 0.5 * (Maps[c].target[0][i] - Maps[c].map[0][i])
* (Maps[c].target[0][i] - Maps[c].map[0][i]);
}
for (int i = 0; i < width; i++) {
cout << Maps[c].map[0][i] << " " << Maps[c].target[0][i]
<< " " << Maps[c].delta[0][i] << endl;
}
int result = 0;
for (int i = 1; i < 10; i++) {
if (Maps[0].map[0][i]
>Maps[0].map[0][i - 1]
&& Maps[0].map[0][i]
> Maps[0].map[0][result]) {
result = i;
}
}
if(Maps[c].target[0][result] == 1){
cout << "Richtig" << endl;
richtig++;
}else{
cout << "Falsch" << endl;
falsch++;
}
cout << endl;
cout << error << endl << endl;
}
}
};
int imgHeight = 28;
int imgWidth = 28;
int samples = 300;
int outs = 10;
struct data {
float **red;
float *d_red;
float **green;
float *d_green;
float **blue;
float *d_blue;
};
vector<vector<data> > dataset(samples, vector<data>(outs));
void ldata(int i, int e) {
dataset[i][e].red = new float*[imgHeight];
for (int f = 0; f < imgHeight; f++) {
dataset[i][e].red[f] = new float[imgWidth];
}
dataset[i][e].green = new float*[imgHeight];
for (int f = 0; f < imgHeight; f++) {
dataset[i][e].green[f] = new float[imgWidth];
}
dataset[i][e].blue = new float*[imgHeight];
for (int f = 0; f < imgHeight; f++) {
dataset[i][e].blue[f] = new float[imgWidth];
}
//////////////////////////////////////
Mat image = imread(
"/root/cuda-workspace/knnGPU/src/dataset/" + to_string(e) + "/"
+ to_string(i) + ".jpg", 1);
for (int x = 0; x < image.size().height; x++) {
for (int y = 0; y < image.size().width; y++) {
//load data into array and normalize values
dataset[i][e].blue[x][y] = image.at<cv::Vec3b>(y, x)[0] / 255;
dataset[i][e].green[x][y] = image.at<cv::Vec3b>(y, x)[1] / 255;
dataset[i][e].red[x][y] = image.at<cv::Vec3b>(y, x)[2] / 255;
}
}
//Allocate Device Memory on device (GPU)
cudaMalloc((void**) &dataset[i][e].d_red, imgWidth * imgHeight * 4);
cudaMalloc((void**) &dataset[i][e].d_green, imgWidth * imgHeight * 4);
cudaMalloc((void**) &dataset[i][e].d_blue, imgWidth * imgHeight * 4);
}
void cdata(int i, int e) {
for (int f = 0; f < imgHeight; f++) {
//copy data into device (GPU)
cudaMemcpy(dataset[i][e].d_red + f * imgWidth, dataset[i][e].red[f],
imgWidth * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dataset[i][e].d_green + f * imgWidth, dataset[i][e].green[f],
imgWidth * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dataset[i][e].d_blue + f * imgWidth, dataset[i][e].blue[f],
imgWidth * sizeof(float), cudaMemcpyHostToDevice);
}
}
vector<Layer*> network;
int topology[5][6] = { { 0, 3, imgHeight, imgWidth, 5, 5 }, { 3, 5, 24, 24, 0,
0 }, { 4, 5, 12, 12, 12 * 12, 100 }, { 1, 1, 1, 100, 100, outs }, { 2,
1, 1, outs, 0, 0 } };
int length = (sizeof(topology) / sizeof(topology[0]));
//Interface
Mat img = Mat(1080, 1920, CV_8UC3, Scalar(0, 0, 0));
bool down = false;
Mat cropped;
Mat gray;
Mat digit = Mat(28, 28, CV_8UC3, Scalar(0, 0, 0));
void CallBackFunc(int event, int x, int y, int flags, void* userdata) {
if (event == EVENT_LBUTTONDOWN) {
down = true;
}
if (event == EVENT_LBUTTONUP) {
down = false;
}
if (event == EVENT_MOUSEMOVE) {
if (down == true) {
circle(img, Point(x, y), 20, Scalar(255, 255, 255), CV_FILLED, 8,
0);
}
imshow("Drawing Window", img);
}
if (event == EVENT_LBUTTONUP) {
cvtColor(img, gray, CV_BGR2GRAY);
//threshold(gray, gray, 0, 55, THRESH_BINARY_INV);
int largest_area = 0;
int largest_contour_index = 0;
Rect bounding_rect;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours(gray, contours, hierarchy, CV_RETR_CCOMP,
CV_CHAIN_APPROX_SIMPLE);
for (int i = 0; i < contours.size(); i++) {
double a = contourArea(contours[i], false);
if (a > largest_area) {
largest_area = a;
cout << i << " area " << a << endl;
largest_contour_index = i;
bounding_rect = boundingRect(contours[i]);
}
}
img(bounding_rect).copyTo(cropped); //Zahl wird ausgeschnitten und in "cropped" kopiert
resize(cropped, digit, cvSize(28, 28));
imshow("Digit", digit);
imwrite("/root/cuda-workspace/knnGPU/src/dataset/999/0.jpg", digit);
ldata(0, 999); //load cropped digit
cdata(0, 999); //copy cropped digit into device
//set digit as Input (3 channels)
setData<<<imgWidth, imgWidth>>>(dataset[0][999].d_red,
network[0]->Maps[0].d_map);
setData<<<imgWidth, imgWidth>>>(dataset[0][999].d_green,
network[0]->Maps[1].d_map);
setData<<<imgWidth, imgWidth>>>(dataset[0][999].d_green,
network[0]->Maps[2].d_map);
for (int i = 0; i < length - 1; i++) {
network[i]->feedForward(network[i + 1]);
}
for (int i = 0; i < 1; i++) {
cudaMemcpy(network[length - 1]->Maps[0].map[i],
network[length - 1]->Maps[0].d_map + i * outs, outs * 4,
cudaMemcpyDeviceToHost);
}
for (int i = 0; i < outs; i++) {
cout << network[length - 1]->Maps[0].map[0][i] << endl;
}
cout << endl << endl;
int result = 0;
for (int i = 1; i < outs; i++) {
if (network[length - 1]->Maps[0].map[0][i]
> network[length - 1]->Maps[0].map[0][i - 1]
&& network[length - 1]->Maps[0].map[0][i]
> network[length - 1]->Maps[0].map[0][result]) {
result = i;
}
}
rectangle(img, Point(10, 10), Point(100, 40), Scalar(0, 0, 0), -1, 8);
putText(img, "Zahl: " + to_string(result), cvPoint(30, 30),
FONT_HERSHEY_COMPLEX_SMALL, 0.8, cvScalar(0, 0, 255), 1, CV_AA);
}
if (event == EVENT_RBUTTONDOWN) {
img = Scalar(0, 0, 0);
}
}
int main() {
//Create 900 cudastreams
for (int i = 0; i < 900; i++) {
cudaStreamCreate(&stream[i]);
}
//load data from dataset into program
for (int i = 0; i < samples; i++) {
for (int e = 0; e < outs; e++) {
ldata(i, e);
}
}
//copy data into device
for (int i = 0; i < samples; i++) {
for (int e = 0; e < outs; e++) {
cdata(i, e);
}
}
//Initialize Network
float var;
for (int i = 0; i < length; i++) {
//Calculate Variation for Xavier Initialization
var = 0.0;
if (topology[i + 1][0] == 3) {
var = topology[i][4] * topology[i][5] * topology[i][1];
}
if (topology[i + 1][0] == 2 || topology[i + 1][0] == 1) {
var = topology[i][4] * topology[i][1];
}
var = 1.0 / var;
//Create Layerobject in the vector network with parameters from topology
if (topology[i][0] == 0) {
network.push_back(
new Input(topology[i][0], topology[i][1], topology[i][2],
topology[i][3], topology[i][4], topology[i][5],
var));
}
if (topology[i][0] == 1) {
network.push_back(
new Fullyconnected(topology[i][0], topology[i][1],
topology[i][2], topology[i][3], topology[i][4],
topology[i][5], var));
}
if (topology[i][0] == 2) {
network.push_back(
new Output(topology[i][0], topology[i][1], topology[i][2],
topology[i][3], topology[i][4], topology[i][5],
var));
}
if (topology[i][0] == 3) {
network.push_back(
new Convolution(topology[i][0], topology[i][1],
topology[i][2], topology[i][3], topology[i][4],
topology[i][5], var));
}
if (topology[i][0] == 4) {
network.push_back(
new Maxpooling(topology[i][0], topology[i][1],
topology[i][2], topology[i][3], topology[i][4],
topology[i][5], var));
}
}
for (int l = 0; l < 200; l++) {
for (int b = 1; b < 190; b++) {
for (int a = 0; a < outs; a++) {
//Load Input and Target into the network
setData<<<imgWidth, imgWidth>>>(dataset[b][a].d_red,
network[0]->Maps[0].d_map);
setData<<<imgWidth, imgWidth>>>(dataset[b][a].d_green,
network[0]->Maps[1].d_map);
setData<<<imgWidth, imgWidth>>>(dataset[b][a].d_green,
network[0]->Maps[2].d_map);
setTarget<<<1, outs>>>(network[length - 1]->Maps[0].d_target,
a);
cudaDeviceSynchronize();
//FeedForward
for (int i = 0; i < length - 1; i++) {
network[i]->feedForward(network[i + 1]);
}
//Backpropagate the error
for (int i = length - 1; i > 0; i--) {
network[i]->backpropagation(network[i - 1]);
}
//Update weights based on bp results
for (int i = 1; i < length; i++) {
network[i]->update(network[i - 1]);
}
//ShowMaps
}
cout << l << endl;
}
}
for (int b = 190; b < 200; b++) {
for (int a = 0; a < outs; a++) {
setData<<<imgWidth, imgWidth>>>(dataset[b][a].d_red,
network[0]->Maps[0].d_map);
setData<<<imgWidth, imgWidth>>>(dataset[b][a].d_green,
network[0]->Maps[1].d_map);
setData<<<imgWidth, imgWidth>>>(dataset[b][a].d_green,
network[0]->Maps[2].d_map);
setTarget<<<1, outs>>>(network[length - 1]->Maps[0].d_target, a);
for (int i = 0; i < length - 1; i++) {
network[i]->feedForward(network[i + 1]);
}
for (int i = 0; i < length; i++) {
network[i]->showLayer();
}
}
}
namedWindow("Drawing Window", 1);
imshow("Drawing Window", img);
setMouseCallback("Drawing Window", CallBackFunc, NULL);
waitKey(0);
cout << richtig << endl;
cout << falsch << endl;
}
|
de3c331e7a6cd4de2324354d6a74f1f63d5e2c56.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "host_defines.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "struct_rotation.h"
#include "book.h"
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/sequence.h>
#include <thrust/partition.h>
#include <thrust/scan.h>
#include <thrust/remove.h>
#include <thrust/unique.h>
#include <thrust/system/hip/execution_policy.h>
#include <thrust/iterator/reverse_iterator.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <math.h>
#include <algorithm>
#include <vector>
#include <time.h>
#include <string.h>
#include <limits.h>
#include <float.h>
#define numberOfLines1 10000
#define block_width 64
vector<struct Line> originalConstraints;
struct is_positive
{
__host__ __device__
bool operator()(const double x)
{
return (x > 0);
}
};
struct is_negative
{
__host__ __device__
bool operator()(const double x)
{
return (x < 0);
}
};
//
//struct is_zero
//{
// __host__ __device__
// bool operator()(const int x)
// {
// return (x == 0);
// }
//};
int seprationG(
thrust::device_vector<double> &t_marker,
thrust::device_vector<int> &t_active,
int numberofelements
)
{
thrust::counting_iterator<int> first(0);
thrust::counting_iterator<int> last = first + numberofelements;
t_active.resize(numberofelements);
t_active.erase(
thrust::copy_if(
first,
last,
t_marker.begin(),
t_active.begin(),
is_positive()),
t_active.end());
//printf("%lf", t_active[0]);
//printf("%lf", t_active[1]);
//printf("%lf", t_active[2]);
//printf("%lf", t_active[3]);
//printf("%lf", t_active[4]);
//printf("%lf", t_active[5]);
return t_active.size();
}
double rotationAngle(struct Objfunc *object)
{
double rotationAngle;
if (object->c2 == 0 && object->c1 > 0) {
rotationAngle = -PI / 2;
}
else if (object->c2 == 0 && object->c1 < 0) {
rotationAngle = PI / 2;
}
else {
rotationAngle = atan(-object->c1 / object->c2);
}
return rotationAngle;
}
//,double *arrayG,double arrayH[]
__global__ void rotation(
struct Line constraints[],
double *lines1,
double *lines2,
double *lines3,
double rotationAngle,
int numberOfLines)
{
int i=0;
double a1Temp, a2Temp, bTemp;
//thrust::device_vector<double> lines1;
int x = threadIdx.x + blockIdx.x * blockDim.x;
// thrust::device_vector<double> lines1;
if (x < (numberOfLines)) {
a1Temp = constraints[x].a1;
a2Temp = constraints[x].a2;
bTemp = constraints[x].b;
lines1[x] = (cos(rotationAngle) * a1Temp) + (sin(rotationAngle) * a2Temp);
lines2[x] = (cos(rotationAngle) * a2Temp) - (sin(rotationAngle) * a1Temp);
lines3[x] = bTemp;
//if (x == 0) {
// printf("%lf\n ", a1Temp);
// printf("%lf\n ", a2Temp);
// printf("%lf\n ", bTemp);
// printf("lines2:");
// printf("%lf\n ", lines1[x]);
// printf("%lf\n ", lines2[x]);
// printf("%lf\n ", lines3[x]);
//}
}
}
//int separation
//(
// thrust::device_vector<double> &d_lines1,
// thrust::device_vector<double> &d_lines2,
// thrust::device_vector<double> &d_lines3
//)
//{
// int numberOfLines = 0;
// double leftBound, rightBound;
// double aTemp, bTemp, cTemp;
// struct Objfunc object;
// double rotationAngleTemp;
//
// FILE* fp;
//
// thrust::host_vector<double> lines1(numberOfLines);
// thrust::device_vector<int> arrayG1(numberOfLines);
//
// fp = fopen("Coefficient.txt", "r");
//
// while (1) {
// fscanf_s(fp, "%lf%lf%lf", &aTemp, &bTemp, &cTemp);
// if (aTemp == 0.0 && bTemp == 0.0 && cTemp == 0.0) {
// break;
// }
// struct Line lineTemp;
// lineTemp.a1 = aTemp;
// lineTemp.a2 = bTemp;
// lineTemp.b = cTemp;
// originalConstraints.push_back(lineTemp);
// lines1.push_back(lineTemp.a2);
//
// numberOfLines++;
//
// }
//
// scanf("%lf%lf", &object.c1, &object.c2);
//
//
// rotationAngleTemp = atan(-object.c1 / object.c2);
//
// struct Line *d_constrainstans;
//
// struct Objfunc *d_object;
//
// int *d_numberOfLines;
// double *d_rotationAngle;
// double *d_arrayG;
// double *d_arrayH;
// int size = numberOfLines * sizeof(struct Line);
//
//
// HANDLE_ERROR(hipMalloc((void**)&d_constrainstans, size));
// HANDLE_ERROR(hipMalloc((void**)&d_constrainstans, size));
//
// HANDLE_ERROR(hipMalloc((void**)&d_arrayG, size));
// HANDLE_ERROR(hipMalloc((void**)&d_arrayH, size));
//
// HANDLE_ERROR(hipMemcpy(d_constrainstans, originalConstraints.data(), size, hipMemcpyHostToDevice));
//
// int numberOfBlocks = ceil((float)numberOfLines / block_width);
// rotation << <numberOfBlocks, block_width >> >
// (d_constrainstans,
// thrust::raw_pointer_cast(&d_lines1[0]),
// thrust::raw_pointer_cast(&d_lines2[0]),
// thrust::raw_pointer_cast(&d_lines3[0]),
// rotationAngleTemp,
// numberOfLines);
//
// printf("%lf", d_lines2[0]);
// return 0;
//}
int main()
{
/*int numberOfLines = 0;
double leftBound, rightBound;
double aTemp, bTemp, cTemp;
struct Objfunc object;
double rotationAngleTemp;
FILE* fp;
fp = fopen("Coefficient.txt", "r");
while (1) {
fscanf_s(fp, "%lf%lf%lf", &aTemp, &bTemp, &cTemp);
if (aTemp == 0.0 && bTemp == 0.0 && cTemp == 0.0) {
break;
}
struct Line lineTemp;
lineTemp.a1 = aTemp;
lineTemp.a2 = bTemp;
lineTemp.b = cTemp;
originalConstraints.push_back(lineTemp);
numberOfLines++;
}
thrust::device_vector<double> d_lines1(numberOfLines);
thrust::device_vector<double> d_lines2(numberOfLines);
thrust::device_vector<double> d_lines3(numberOfLines);
separation(d_lines1,d_lines2,d_lines3);*/
int numberOfLines = 0;
double leftBound, rightBound;
double aTemp, bTemp, cTemp;
struct Objfunc object;
double rotationAngleTemp;
//double *arrayG=(double*)malloc(numberOfLines * sizeof(double));
//double *arrayH=(double*)malloc(numberOfLines * sizeof(double));
FILE* fp;
//thrust::device_vector<double> d_lines1(numberOfLines);
thrust::host_vector<double> lines1(numberOfLines);
thrust::device_vector<int> arrayG1(numberOfLines);
fp = fopen("Coefficient.txt", "r");
while (1) {
fscanf_s(fp, "%lf%lf%lf", &aTemp, &bTemp, &cTemp);
if (aTemp == 0.0 && bTemp == 0.0 && cTemp == 0.0) {
break;
}
struct Line lineTemp;
lineTemp.a1 = aTemp;
lineTemp.a2 = bTemp;
lineTemp.b = cTemp;
originalConstraints.push_back(lineTemp);
lines1.push_back(lineTemp.a2);
// printf("%lf\n ",lines1[2]);
numberOfLines++;
}
//for (int i = 0; i < numberOfLines; i++)
//{
// printf("%lf\n ", lines1[i]);
//}
//printf("%lf%lf%lf\n", originalConstraints[0].a1, originalConstraints[0].a2, originalConstraints[0].b);
scanf( "%lf%lf", &object.c1, &object.c2);
//scanf( "%lf%lf", &leftBound, &rightBound);
rotationAngleTemp = atan(-object.c1 / object.c2);
//printf("%lf", rotationAngleTemp);
struct Line *d_constrainstans;
//double *d_lines1;
//double *d_lines2;
//double *d_lines3;
struct Objfunc *d_object;
int *d_numberOfLines;
double *d_rotationAngle;
double *d_arrayG;
double *d_arrayH;
int size = numberOfLines * sizeof(struct Line);
// thrust::device_vector<int> d_lines(numberOfLines);
HANDLE_ERROR(hipMalloc((void**)&d_constrainstans, size));
//HANDLE_ERROR(hipMalloc((void**)&d_lines1, size));
//HANDLE_ERROR(hipMalloc((void**)&d_lines2, size));
//HANDLE_ERROR(hipMalloc((void**)&d_lines3, size));
HANDLE_ERROR(hipMalloc((void**)&d_arrayG, size));
HANDLE_ERROR(hipMalloc((void**)&d_arrayH, size));
HANDLE_ERROR(hipMemcpy(d_constrainstans, originalConstraints.data(), size, hipMemcpyHostToDevice));
int numberOfBlocks=ceil((float)numberOfLines/block_width);
thrust::device_vector<double> lines2;
thrust::device_vector<double> d_lines1(numberOfLines);
thrust::device_vector<double> d_lines2(numberOfLines);
thrust::device_vector<double> d_lines3(numberOfLines);
/*thrust::device_vector<double> d_lines2(numberOfLines);
thrust::device_vector<double> d_lines3(numberOfLines);*/
//
//
//<< <numberOfBlocks, block_width >> >
rotation << <numberOfBlocks, block_width >> >
(d_constrainstans,
thrust::raw_pointer_cast(&d_lines1[0]),
thrust::raw_pointer_cast(&d_lines2[0]),
thrust::raw_pointer_cast(&d_lines3[0]),
rotationAngleTemp,
numberOfLines);
int host_array[4];
//hipMemcpy(host_array, thrust::raw_pointer_cast(&d_lines2[0]), 4*sizeof(double), hipMemcpyDeviceToHost);
//printf("%lf ", host_array[0]);
//printf("%lf ", host_array[1]);
//printf("%lf ", host_array[2]);
//printf("%lf ", host_array[3]);
int i = seprationG(d_lines2, arrayG1, numberOfLines);
//printf("%d", i);
hipMemcpy(host_array, thrust::raw_pointer_cast(&arrayG1[0]), 4 * sizeof(int), hipMemcpyDeviceToHost);
printf("%d ", host_array[0]);
printf("%d ", host_array[1]);
printf("%d ", host_array[2]);
printf("%d ", host_array[3]);
//
//printf("%lf ", d_lines2[0]);
//printf("%lf ", d_lines2[1]);
//printf("%lf ", d_lines2[2]);
//printf("%lf ", d_lines2[3]);
//thrust::host_vector<double> output(numberOfLines);
//HANDLE_ERROR(hipMemcpy(d_lines2.data(),arrayH, size, hipMemcpyDeviceToHost));
//printf("t_marker:%lf ", arrayH[0]);
//thrust::copy(arrayG1.begin(), arrayG1.end(), output.begin());
//
// printf("%lf ", output[0]);
//
//HANDLE_ERROR()
//HANDLE_ERROR(hipMemcpy(arrayG, d_arrayG, size, hipMemcpyDeviceToHost));
//HANDLE_ERROR(hipMemcpy(arrayH, d_arrayH, size, hipMemcpyDeviceToHost));
//printf("%d\n", arrayG[0]);
//printf("%d\n", arrayH[0]);
//
// printf("%lf %lf %lf", originalConstraints[1].a1, originalConstraints[1].a2, originalConstraints[1].b);
//
//seprationG(lines2, arrayG1, numberOfLines);
//thrust::copy(arrayG1.begin(), arrayG1.end(), output.begin());
////
//printf("%lf ", output[0]);
//hipFree(d_constrainstans);
//hipFree(d_lines);
//hipFree(d_numberOfLines);
//hipFree(d_rotationAngle);
}
| de3c331e7a6cd4de2324354d6a74f1f63d5e2c56.cu | #include "cuda.h"
#include "host_defines.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "struct_rotation.h"
#include "book.h"
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/sequence.h>
#include <thrust/partition.h>
#include <thrust/scan.h>
#include <thrust/remove.h>
#include <thrust/unique.h>
#include <thrust/system/cuda/execution_policy.h>
#include <thrust/iterator/reverse_iterator.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <math.h>
#include <algorithm>
#include <vector>
#include <time.h>
#include <string.h>
#include <limits.h>
#include <float.h>
#define numberOfLines1 10000
#define block_width 64
vector<struct Line> originalConstraints;
struct is_positive
{
__host__ __device__
bool operator()(const double x)
{
return (x > 0);
}
};
struct is_negative
{
__host__ __device__
bool operator()(const double x)
{
return (x < 0);
}
};
//
//struct is_zero
//{
// __host__ __device__
// bool operator()(const int x)
// {
// return (x == 0);
// }
//};
int seprationG(
thrust::device_vector<double> &t_marker,
thrust::device_vector<int> &t_active,
int numberofelements
)
{
thrust::counting_iterator<int> first(0);
thrust::counting_iterator<int> last = first + numberofelements;
t_active.resize(numberofelements);
t_active.erase(
thrust::copy_if(
first,
last,
t_marker.begin(),
t_active.begin(),
is_positive()),
t_active.end());
//printf("%lf", t_active[0]);
//printf("%lf", t_active[1]);
//printf("%lf", t_active[2]);
//printf("%lf", t_active[3]);
//printf("%lf", t_active[4]);
//printf("%lf", t_active[5]);
return t_active.size();
}
double rotationAngle(struct Objfunc *object)
{
double rotationAngle;
if (object->c2 == 0 && object->c1 > 0) {
rotationAngle = -PI / 2;
}
else if (object->c2 == 0 && object->c1 < 0) {
rotationAngle = PI / 2;
}
else {
rotationAngle = atan(-object->c1 / object->c2);
}
return rotationAngle;
}
//,double *arrayG,double arrayH[]
__global__ void rotation(
struct Line constraints[],
double *lines1,
double *lines2,
double *lines3,
double rotationAngle,
int numberOfLines)
{
int i=0;
double a1Temp, a2Temp, bTemp;
//thrust::device_vector<double> lines1;
int x = threadIdx.x + blockIdx.x * blockDim.x;
// thrust::device_vector<double> lines1;
if (x < (numberOfLines)) {
a1Temp = constraints[x].a1;
a2Temp = constraints[x].a2;
bTemp = constraints[x].b;
lines1[x] = (cos(rotationAngle) * a1Temp) + (sin(rotationAngle) * a2Temp);
lines2[x] = (cos(rotationAngle) * a2Temp) - (sin(rotationAngle) * a1Temp);
lines3[x] = bTemp;
//if (x == 0) {
// printf("%lf\n ", a1Temp);
// printf("%lf\n ", a2Temp);
// printf("%lf\n ", bTemp);
// printf("lines2:");
// printf("%lf\n ", lines1[x]);
// printf("%lf\n ", lines2[x]);
// printf("%lf\n ", lines3[x]);
//}
}
}
//int separation
//(
// thrust::device_vector<double> &d_lines1,
// thrust::device_vector<double> &d_lines2,
// thrust::device_vector<double> &d_lines3
//)
//{
// int numberOfLines = 0;
// double leftBound, rightBound;
// double aTemp, bTemp, cTemp;
// struct Objfunc object;
// double rotationAngleTemp;
//
// FILE* fp;
//
// thrust::host_vector<double> lines1(numberOfLines);
// thrust::device_vector<int> arrayG1(numberOfLines);
//
// fp = fopen("Coefficient.txt", "r");
//
// while (1) {
// fscanf_s(fp, "%lf%lf%lf", &aTemp, &bTemp, &cTemp);
// if (aTemp == 0.0 && bTemp == 0.0 && cTemp == 0.0) {
// break;
// }
// struct Line lineTemp;
// lineTemp.a1 = aTemp;
// lineTemp.a2 = bTemp;
// lineTemp.b = cTemp;
// originalConstraints.push_back(lineTemp);
// lines1.push_back(lineTemp.a2);
//
// numberOfLines++;
//
// }
//
// scanf("%lf%lf", &object.c1, &object.c2);
//
//
// rotationAngleTemp = atan(-object.c1 / object.c2);
//
// struct Line *d_constrainstans;
//
// struct Objfunc *d_object;
//
// int *d_numberOfLines;
// double *d_rotationAngle;
// double *d_arrayG;
// double *d_arrayH;
// int size = numberOfLines * sizeof(struct Line);
//
//
// HANDLE_ERROR(cudaMalloc((void**)&d_constrainstans, size));
// HANDLE_ERROR(cudaMalloc((void**)&d_constrainstans, size));
//
// HANDLE_ERROR(cudaMalloc((void**)&d_arrayG, size));
// HANDLE_ERROR(cudaMalloc((void**)&d_arrayH, size));
//
// HANDLE_ERROR(cudaMemcpy(d_constrainstans, originalConstraints.data(), size, cudaMemcpyHostToDevice));
//
// int numberOfBlocks = ceil((float)numberOfLines / block_width);
// rotation << <numberOfBlocks, block_width >> >
// (d_constrainstans,
// thrust::raw_pointer_cast(&d_lines1[0]),
// thrust::raw_pointer_cast(&d_lines2[0]),
// thrust::raw_pointer_cast(&d_lines3[0]),
// rotationAngleTemp,
// numberOfLines);
//
// printf("%lf", d_lines2[0]);
// return 0;
//}
int main()
{
/*int numberOfLines = 0;
double leftBound, rightBound;
double aTemp, bTemp, cTemp;
struct Objfunc object;
double rotationAngleTemp;
FILE* fp;
fp = fopen("Coefficient.txt", "r");
while (1) {
fscanf_s(fp, "%lf%lf%lf", &aTemp, &bTemp, &cTemp);
if (aTemp == 0.0 && bTemp == 0.0 && cTemp == 0.0) {
break;
}
struct Line lineTemp;
lineTemp.a1 = aTemp;
lineTemp.a2 = bTemp;
lineTemp.b = cTemp;
originalConstraints.push_back(lineTemp);
numberOfLines++;
}
thrust::device_vector<double> d_lines1(numberOfLines);
thrust::device_vector<double> d_lines2(numberOfLines);
thrust::device_vector<double> d_lines3(numberOfLines);
separation(d_lines1,d_lines2,d_lines3);*/
int numberOfLines = 0;
double leftBound, rightBound;
double aTemp, bTemp, cTemp;
struct Objfunc object;
double rotationAngleTemp;
//double *arrayG=(double*)malloc(numberOfLines * sizeof(double));
//double *arrayH=(double*)malloc(numberOfLines * sizeof(double));
FILE* fp;
//thrust::device_vector<double> d_lines1(numberOfLines);
thrust::host_vector<double> lines1(numberOfLines);
thrust::device_vector<int> arrayG1(numberOfLines);
fp = fopen("Coefficient.txt", "r");
while (1) {
fscanf_s(fp, "%lf%lf%lf", &aTemp, &bTemp, &cTemp);
if (aTemp == 0.0 && bTemp == 0.0 && cTemp == 0.0) {
break;
}
struct Line lineTemp;
lineTemp.a1 = aTemp;
lineTemp.a2 = bTemp;
lineTemp.b = cTemp;
originalConstraints.push_back(lineTemp);
lines1.push_back(lineTemp.a2);
// printf("%lf\n ",lines1[2]);
numberOfLines++;
}
//for (int i = 0; i < numberOfLines; i++)
//{
// printf("%lf\n ", lines1[i]);
//}
//printf("%lf%lf%lf\n", originalConstraints[0].a1, originalConstraints[0].a2, originalConstraints[0].b);
scanf( "%lf%lf", &object.c1, &object.c2);
//scanf( "%lf%lf", &leftBound, &rightBound);
rotationAngleTemp = atan(-object.c1 / object.c2);
//printf("%lf", rotationAngleTemp);
struct Line *d_constrainstans;
//double *d_lines1;
//double *d_lines2;
//double *d_lines3;
struct Objfunc *d_object;
int *d_numberOfLines;
double *d_rotationAngle;
double *d_arrayG;
double *d_arrayH;
int size = numberOfLines * sizeof(struct Line);
// thrust::device_vector<int> d_lines(numberOfLines);
HANDLE_ERROR(cudaMalloc((void**)&d_constrainstans, size));
//HANDLE_ERROR(cudaMalloc((void**)&d_lines1, size));
//HANDLE_ERROR(cudaMalloc((void**)&d_lines2, size));
//HANDLE_ERROR(cudaMalloc((void**)&d_lines3, size));
HANDLE_ERROR(cudaMalloc((void**)&d_arrayG, size));
HANDLE_ERROR(cudaMalloc((void**)&d_arrayH, size));
HANDLE_ERROR(cudaMemcpy(d_constrainstans, originalConstraints.data(), size, cudaMemcpyHostToDevice));
int numberOfBlocks=ceil((float)numberOfLines/block_width);
thrust::device_vector<double> lines2;
thrust::device_vector<double> d_lines1(numberOfLines);
thrust::device_vector<double> d_lines2(numberOfLines);
thrust::device_vector<double> d_lines3(numberOfLines);
/*thrust::device_vector<double> d_lines2(numberOfLines);
thrust::device_vector<double> d_lines3(numberOfLines);*/
//
//
//<< <numberOfBlocks, block_width >> >
rotation << <numberOfBlocks, block_width >> >
(d_constrainstans,
thrust::raw_pointer_cast(&d_lines1[0]),
thrust::raw_pointer_cast(&d_lines2[0]),
thrust::raw_pointer_cast(&d_lines3[0]),
rotationAngleTemp,
numberOfLines);
int host_array[4];
//cudaMemcpy(host_array, thrust::raw_pointer_cast(&d_lines2[0]), 4*sizeof(double), cudaMemcpyDeviceToHost);
//printf("%lf ", host_array[0]);
//printf("%lf ", host_array[1]);
//printf("%lf ", host_array[2]);
//printf("%lf ", host_array[3]);
int i = seprationG(d_lines2, arrayG1, numberOfLines);
//printf("%d", i);
cudaMemcpy(host_array, thrust::raw_pointer_cast(&arrayG1[0]), 4 * sizeof(int), cudaMemcpyDeviceToHost);
printf("%d ", host_array[0]);
printf("%d ", host_array[1]);
printf("%d ", host_array[2]);
printf("%d ", host_array[3]);
//
//printf("%lf ", d_lines2[0]);
//printf("%lf ", d_lines2[1]);
//printf("%lf ", d_lines2[2]);
//printf("%lf ", d_lines2[3]);
//thrust::host_vector<double> output(numberOfLines);
//HANDLE_ERROR(cudaMemcpy(d_lines2.data(),arrayH, size, cudaMemcpyDeviceToHost));
//printf("t_marker:%lf ", arrayH[0]);
//thrust::copy(arrayG1.begin(), arrayG1.end(), output.begin());
//
// printf("%lf ", output[0]);
//
//HANDLE_ERROR()
//HANDLE_ERROR(cudaMemcpy(arrayG, d_arrayG, size, cudaMemcpyDeviceToHost));
//HANDLE_ERROR(cudaMemcpy(arrayH, d_arrayH, size, cudaMemcpyDeviceToHost));
//printf("%d\n", arrayG[0]);
//printf("%d\n", arrayH[0]);
//
// printf("%lf %lf %lf", originalConstraints[1].a1, originalConstraints[1].a2, originalConstraints[1].b);
//
//seprationG(lines2, arrayG1, numberOfLines);
//thrust::copy(arrayG1.begin(), arrayG1.end(), output.begin());
////
//printf("%lf ", output[0]);
//cudaFree(d_constrainstans);
//cudaFree(d_lines);
//cudaFree(d_numberOfLines);
//cudaFree(d_rotationAngle);
}
|
ed7b7ff5d1d234966dd9ec956d1c0a310c775374.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef _TIMER_
#include "hip/hip_runtime_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
#define GAPX (22)
#define GAPY (22)
#define EXTENT (5)
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
hipLaunchKernelGGL(( __kernel_init__), dim3(init_grid),dim3(init_block), 0, 0, d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
/* X, Y, Z */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
__shared__ float tilevar[4][32*32];
float t[4], b[4], out=0.0f;
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)+GAPX);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY);
// Initialize the values
int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1)) ){
tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = 0.0f;
tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = 0.0f;
tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = 0.0f;
}
// Rest of the computation
for (int __iter_2__ = 0; __iter_2__ <= L-1; __iter_2__++) {
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))){
tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))){
// Bottom
float __temp_3__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_12__ = (tilevar[0][ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_43__ = 0.5f * (__temp_33__ + __temp_42__) / 159;
t[0] += __temp_43__;
//printf ("computed top at (%d,%d,%d) = %.6f\n", __iter_2__, __iter_4__, __iter_5__, __temp_43__);
//Mid
float __temp_47__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_52__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[0] += __temp_88__;
// Top
float __temp_92__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_102__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] += __temp_133__;
}
// Copy the topmost value
if ((__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))) & (__iter_4__ < (FORMA_MAX((__iter_1__+1),1)+2) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))-2) | __iter_5__ < (FORMA_MAX((__iter_0__+1),1)+2) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))-2))) {
__copy_arr_0__[__iter_5__+N*(__iter_4__+M*__iter_2__)] = tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))){
float __temp_3__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_12__ = (tilevar[1][ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[1] += __temp_43__;
//printf ("Top at %d,%d,%d = %.6f\n", __iter_2__-1, __iter_4__, __iter_5__, __temp_43__);
//Mid
float __temp_47__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_52__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[1] += __temp_88__;
//printf ("Mid at %d,%d,%d = %.6f\n", __iter_2__-1, __iter_4__, __iter_5__, __temp_88__);
// Top
float __temp_92__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_102__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] += __temp_133__;
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))) & (__iter_4__ < (FORMA_MAX((__iter_1__+2),1)+2) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2))-2) | __iter_5__ < (FORMA_MAX((__iter_0__+2),1)+2) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))-2))) {
__copy_arr_1__[__iter_5__+N*(__iter_4__+M*__iter_2__)] = tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2))){
// Bottom
float __temp_3__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_12__ = (tilevar[2][ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[2] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_52__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[2] += __temp_88__;
// Top
float __temp_92__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_102__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] += __temp_133__;
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__+3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__+3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2))) & (__iter_4__ < (FORMA_MAX((__iter_1__+3),1)+2) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2))-2) | __iter_5__ < (FORMA_MAX((__iter_0__+3),1)+2) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2))-2))) {
__copy_arr_2__[__iter_5__+N*(__iter_4__+M*__iter_2__)] = tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+4),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+4),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-2))){
// Bottom
float __temp_3__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_12__ = (tilevar[3][ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[3] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_52__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[3] += __temp_88__;
// Top
float __temp_92__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_102__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
out += __temp_133__;
__var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-4,0))] = out;
//printf ("computed (%d,%d,%d) = %.6f\n", __iter_2__-4, __iter_4__, __iter_5__, out);
}
__syncthreads ();
// Now rotate
tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[0];
b[0] = t[0];
t[0] = 0.0f;
tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[1];
b[1] = t[1];
t[1] = 0.0f;
tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[2];
b[2] = t[2];
t[2] = 0.0f;
out= b[3];
b[3] = t[3];
t[3] = 0.0f;
}
}
/* X+GAP, Y, Z */
__global__ void __kernel___forma_kernel__1__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
__shared__ float tilevar[4][32*32];
float t[4], b[4], out=0.0f;
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)+GAPX) + (int)FORMA_BLOCKDIM_X;
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY);
// Initialize the values
int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_5__ = FORMA_MAX(__iter_0__-EXTENT,0) + (int)(threadIdx.x) ;
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-1)) ){
tilevar[1][__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = 0.0f;
tilevar[2][__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = 0.0f;
tilevar[3][__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = 0.0f;
}
// Rest of the computation
for (int __iter_2__ = 0; __iter_2__ <= L-1; __iter_2__++) {
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-1))){
tilevar[0][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__-1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2)) ){
// Bottom
float __temp_3__ = (tilevar[0][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_12__ = (tilevar[0][ __iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[0][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[0][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[0] += __temp_43__;
//printf ("Top at %d,%d,%d = %.6f\n", __iter_2__, __iter_4__, __iter_5__, t[0]);
//Mid
float __temp_47__ = (tilevar[0][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_52__ = (tilevar[0][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[0][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[0][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[0][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[0][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[0][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[0][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[0][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[0] += __temp_88__;
//printf ("Mid at %d,%d,%d = %.6f\n", __iter_2__, __iter_4__, __iter_5__, __temp_88__);
// Top
float __temp_92__ = (tilevar[0][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_102__ = (tilevar[0][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[0][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[0][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
tilevar[1][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] += __temp_133__;
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__-1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2))) & (__iter_4__ < (FORMA_MAX((__iter_1__+1),1)+2) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))-2))) {
__copy_arr_0__[__iter_5__+N*(__iter_4__+M*__iter_2__)] = tilevar[1][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__-3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2))) & (__iter_5__ < FORMA_MAX((__iter_0__-1),1) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2)))) {
tilevar[1][__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
//printf ("Writing values at index (%d,%d) = %.6f\n", __iter_4__-__iter_1__, __iter_5__+(EXTENT-__iter_0__), __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__))]);
;
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__-2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2)) ){
float __temp_3__ = (tilevar[1][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_12__ = (tilevar[1][ __iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[1][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[1][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[1] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[1][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_52__ = (tilevar[1][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[1][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[1][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[1][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[1][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[1][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[1][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[1][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[1] += __temp_88__;
// Top
float __temp_92__ = (tilevar[1][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_102__ = (tilevar[1][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[1][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[1][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
tilevar[2][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] += __temp_133__;
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2))) & ( __iter_5__ >= FORMA_MAX((__iter_0__-2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2))) & (__iter_4__ < (FORMA_MAX((__iter_1__+2),1)+2) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2))-2))) {
__copy_arr_1__[__iter_5__+N*(__iter_4__+M*__iter_2__)] = tilevar[2][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__-4),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-2))) & (__iter_5__ < FORMA_MAX((__iter_0__-2),1) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2)))) {
tilevar[2][__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__-3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2)) ){
// Bottom
float __temp_3__ = (tilevar[2][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_12__ = (tilevar[2][ __iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[2][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[2][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[2] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[2][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_52__ = (tilevar[2][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[2][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[2][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[2][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[2][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[2][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[2][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[2][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[2] += __temp_88__;
// Top
float __temp_92__ = (tilevar[2][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_102__ = (tilevar[2][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[2][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[2][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
tilevar[3][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] += __temp_133__;
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__+3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__-3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2))) & (__iter_4__ < (FORMA_MAX((__iter_1__+3),1)+2) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2))-2))) {
__copy_arr_2__[__iter_5__+N*(__iter_4__+M*__iter_2__)] = tilevar[3][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__+3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__-5),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(N-2))) & (__iter_5__ < FORMA_MAX((__iter_0__-3),1) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2)))) {
tilevar[3][__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+4),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__-4),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-2)) ){
float __temp_3__ = (tilevar[3][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_12__ = (tilevar[3][ __iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[3][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[3][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[3] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[3][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_52__ = (tilevar[3][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[3][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[3][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[3][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[3][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[3][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[3][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[3][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[3] += __temp_88__;
// Top
float __temp_92__ = (tilevar[3][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_102__ = (tilevar[3][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[3][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[3][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
out += __temp_133__;
__var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-4,0))] = out;
}
__syncthreads ();
// Now rotate
tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[0];
b[0] = t[0];
t[0] = 0.0f;
tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[1];
b[1] = t[1];
t[1] = 0.0f;
tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[2];
b[2] = t[2];
t[2] = 0.0f;
out= b[3];
b[3] = t[3];
t[3] = 0.0f;
}
}
/* X, Y+GAP, Z */
__global__ void __kernel___forma_kernel__2__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
__shared__ float tilevar[4][32*32];
float t[4], b[4], out=0.0f;
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)+GAPX);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY) + (int)(FORMA_BLOCKDIM_Y);
int __iter_4__ = FORMA_MAX(__iter_1__-EXTENT,0) + (int)(threadIdx.y) ;
int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1)) ){
tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = 0.0f;
tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = 0.0f;
tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = 0.0f;
}
// Rest of the computation
for (int __iter_2__ = 0; __iter_2__ <= L-1; __iter_2__++) {
if(__iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1)) ){
tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2)) ){
// Bottom
float __temp_3__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_12__ = (tilevar[0][ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[0] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_52__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[0] += __temp_88__;
// Top
float __temp_92__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_102__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] += __temp_133__;
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__-1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))) & (__iter_5__ < (FORMA_MAX((__iter_0__+1),1)+2) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))-2))) {
__copy_arr_0__[__iter_5__+N*(__iter_4__+M*__iter_2__)] = tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)];
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__-3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))) & (__iter_4__ < FORMA_MAX((__iter_1__-1),1) | __iter_4__ > FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2)))) {
tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2)) ){
// Bottom
float __temp_3__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_12__ = (tilevar[1][ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[1] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_52__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[1] += __temp_88__;
// Top
float __temp_92__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_102__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] += __temp_133__;
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__-2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))) & (__iter_5__ < (FORMA_MAX((__iter_0__+2),1)+2) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))-2))) {
__copy_arr_1__[__iter_5__+N*(__iter_4__+M*__iter_2__)] = tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)];
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__-4),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))) & (__iter_4__ < FORMA_MAX((__iter_1__-2),1) | __iter_4__ > FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2)))) {
tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2)) ){
// Bottom
float __temp_3__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_12__ = (tilevar[2][ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[2] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_52__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[2] += __temp_88__;
// Top
float __temp_92__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_102__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] += __temp_133__;
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__-3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__+3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2))) & (__iter_5__ < (FORMA_MAX((__iter_0__+3),1)+2) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2))-2))) {
__copy_arr_2__[__iter_5__+N*(__iter_4__+M*__iter_2__)] = tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)];
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__-5),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+5)-1),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__+3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2))) & (__iter_4__ < FORMA_MAX((__iter_1__-3),1) | __iter_4__ > FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2)))) {
tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-4),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+4),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-2)) ){
// Bottom
float __temp_3__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_12__ = (tilevar[3][ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[3] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_52__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[3] += __temp_88__;
// Top
float __temp_92__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_102__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
out += __temp_133__;
__var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-4,0))] = out;
}
__syncthreads ();
// Now rotate
tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[0];
b[0] = t[0];
t[0] = 0.0f;
tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[1];
b[1] = t[1];
t[1] = 0.0f;
tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[2];
b[2] = t[2];
t[2] = 0.0f;
out= b[3];
b[3] = t[3];
t[3] = 0.0f;
}
}
/* X+GAP, Y+GAP, Z */
__global__ void __kernel___forma_kernel__3__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
__shared__ float tilevar[4][32*32];
float t[4], b[4], out=0.0f;
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)+GAPX) + (int)(FORMA_BLOCKDIM_X);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY) + (int)(FORMA_BLOCKDIM_Y);
int __iter_4__ = FORMA_MAX(__iter_1__-EXTENT,0) + (int)(threadIdx.y) ;
int __iter_5__ = FORMA_MAX(__iter_0__-EXTENT,0) + (int)(threadIdx.x) ;
if(__iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-1)) ){
tilevar[1][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = 0.0f;
tilevar[2][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = 0.0f;
tilevar[3][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = 0.0f;
}
for (int __iter_2__ = 0; __iter_2__ <= L-1; __iter_2__++) {
if(__iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-1)) ){
tilevar[0][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__-1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2))){
// Bottom
float __temp_3__ = (tilevar[0][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_12__ = (tilevar[0][ __iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[0][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[0][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[0] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[0][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_52__ = (tilevar[0][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[0][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[0][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[0][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[0][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[0][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[0][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[0][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[0] += __temp_88__;
// Top
float __temp_92__ = (tilevar[0][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_102__ = (tilevar[0][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[0][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[0][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
tilevar[1][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] += __temp_133__;
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__-3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__-3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2))) & (__iter_4__ < FORMA_MAX((__iter_1__-1),1) | __iter_4__ > FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2)) | __iter_5__ < FORMA_MAX((__iter_0__-1),1) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2)))) {
tilevar[1][__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__-2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2))){
// Bottom
float __temp_3__ = (tilevar[1][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_12__ = (tilevar[1][ __iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[1][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[1][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[1] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[1][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_52__ = (tilevar[1][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[1][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[1][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[1][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[1][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[1][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[1][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[1][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[1] += __temp_88__;
// Top
float __temp_92__ = (tilevar[1][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_102__ = (tilevar[1][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[1][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[1][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
tilevar[2][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] += __temp_133__;
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__-4),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__-4),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-2))) & (__iter_4__ < (FORMA_MAX((__iter_1__-2),1)) | __iter_4__ > (FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2))) | __iter_5__ < FORMA_MAX((__iter_0__-2),1) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2)))) {
tilevar[2][__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__-3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2))){
// Bottom
float __temp_3__ = (tilevar[2][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_12__ = (tilevar[2][ __iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[2][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[2][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[2] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[2][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_52__ = (tilevar[2][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[2][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[2][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[2][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[2][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[2][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[2][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[2][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[2] += __temp_88__;
// Top
float __temp_92__ = (tilevar[2][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_102__ = (tilevar[2][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[2][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[2][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
tilevar[3][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] += __temp_133__;
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__-5),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+5)-1),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__-5),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(N-2))) & (__iter_4__ < (FORMA_MAX((__iter_1__-3),1)) | __iter_4__ > (FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2))) | __iter_5__ < FORMA_MAX((__iter_0__-3),1) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2)))) {
tilevar[3][__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-4),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__-4),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-2))){
// Bottom
float __temp_3__ = (tilevar[3][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_12__ = (tilevar[3][ __iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[3][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[3][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[3] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[3][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_52__ = (tilevar[3][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[3][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[3][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[3][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[3][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[3][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[3][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[3][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[3] += __temp_88__;
// Top
float __temp_92__ = (tilevar[3][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_102__ = (tilevar[3][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[3][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[3][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
out += __temp_133__;
__var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-4,0))] = out;
}
__syncthreads ();
// Now rotate
tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[0];
b[0] = t[0];
t[0] = 0.0f;
tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[1];
b[1] = t[1];
t[1] = 0.0f;
tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[2];
b[2] = t[2];
t[2] = 0.0f;
out= b[3];
b[3] = t[3];
t[3] = 0.0f;
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void j3d19pt(float * h_input, int L, int M, int N, float * __var_0__){
/* Host allocation Begin */
float * input;
hipMalloc(&input,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : input\n");
hipPointerAttribute_t ptrAttrib_h_input;
hipMemcpyKind memcpy_kind_h_input = hipMemcpyHostToDevice;
if (hipPointerGetAttributes(&ptrAttrib_h_input, h_input) == hipSuccess)
if (ptrAttrib_h_input.memoryType == hipMemoryTypeDevice)
memcpy_kind_h_input = hipMemcpyDeviceToDevice;
hipGetLastError();
if( memcpy_kind_h_input != hipMemcpyDeviceToDevice ){
hipMemcpy(input,h_input,sizeof(float)*(L*M*N), memcpy_kind_h_input);
}
float * __var_1__;
hipMalloc(&__var_1__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __copy_arr_0__;
hipMalloc(&__copy_arr_0__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __copy_arr_0__\n");
float * __copy_arr_1__;
hipMalloc(&__copy_arr_1__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __copy_arr_1__\n");
float * __copy_arr_2__;
hipMalloc(&__copy_arr_2__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __copy_arr_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
hipDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,hipDeviceAttributeMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
hipEvent_t _forma_timer_start_,_forma_timer_stop_;
hipEventCreate(&_forma_timer_start_);
hipEventCreate(&_forma_timer_stop_);
hipEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = N;
int __size_1___kernel___forma_kernel__0__ = M;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 32;
int __block_2___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x+GAPX);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y+GAPY);
int __grid_2___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__);
dim3 unrollConfig (__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z);
hipLaunchKernelGGL(( __kernel___forma_kernel__0__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), 0, 0, input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
hipLaunchKernelGGL(( __kernel___forma_kernel__1__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), 0, 0, input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__1__\n");
hipLaunchKernelGGL(( __kernel___forma_kernel__2__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), 0, 0, input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__2__\n");
hipLaunchKernelGGL(( __kernel___forma_kernel__3__), dim3(__gridConfig___kernel___forma_kernel__0__), dim3(unrollConfig), 0, 0, input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__3__\n");
hipPointerAttribute_t ptrAttrib___var_0__;
hipMemcpyKind memcpy_kind___var_0__ = hipMemcpyDeviceToHost;
if (hipPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == hipSuccess)
if (ptrAttrib___var_0__.memoryType == hipMemoryTypeDevice)
memcpy_kind___var_0__ = hipMemcpyDeviceToDevice;
hipGetLastError();
hipMemcpy(__var_0__,__var_1__, sizeof(float)*(L*M*N), memcpy_kind___var_0__);
#ifdef _TIMER_
hipEventRecord(_forma_timer_stop_,0);
hipEventSynchronize(_forma_timer_stop_);
float elapsedTime;
hipEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
hipEventDestroy(_forma_timer_start_);
hipEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
hipFree(input);
hipFree(__var_1__);
hipFree(__copy_arr_0__);
hipFree(__copy_arr_1__);
hipFree(__copy_arr_2__);
}
/*Host Free End*/
| ed7b7ff5d1d234966dd9ec956d1c0a310c775374.cu | #include "cuda.h"
#ifdef _TIMER_
#include "cuda_profiler_api.h"
#endif
#include "stdio.h"
#define FORMA_MAX(a,b) ( (a) > (b) ? (a) : (b) )
#define max(a,b) FORMA_MAX(a,b)
#define FORMA_MIN(a,b) ( (a) < (b) ? (a) : (b) )
#define min(a,b) FORMA_MIN(a,b)
#define FORMA_CEIL(a,b) ( (a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1 )
#ifndef FORMA_MAX_BLOCKDIM_0
#define FORMA_MAX_BLOCKDIM_0 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_1
#define FORMA_MAX_BLOCKDIM_1 1024
#endif
#ifndef FORMA_MAX_BLOCKDIM_2
#define FORMA_MAX_BLOCKDIM_2 1024
#endif
#define GAPX (22)
#define GAPY (22)
#define EXTENT (5)
template<typename T>
__global__ void __kernel_init__(T* input, T value)
{
int loc = (int)(blockIdx.x)*(int)(blockDim.x)+(int)(threadIdx.x);
input[loc] = value;
}
template<typename T>
void initialize_array(T* d_input, int size, T value)
{
dim3 init_grid(FORMA_CEIL(size,FORMA_MAX_BLOCKDIM_0));
dim3 init_block(FORMA_MAX_BLOCKDIM_0);
__kernel_init__<<<init_grid,init_block>>>(d_input,value);
}
void Check_CUDA_Error(const char* message);
/*Texture references */
/*Shared Memory Variable */
extern __shared__ char __FORMA_SHARED_MEM__[];
/* Device code Begin */
/* X, Y, Z */
__global__ void __kernel___forma_kernel__0__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
__shared__ float tilevar[4][32*32];
float t[4], b[4], out=0.0f;
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)+GAPX);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY);
// Initialize the values
int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1)) ){
tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = 0.0f;
tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = 0.0f;
tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = 0.0f;
}
// Rest of the computation
for (int __iter_2__ = 0; __iter_2__ <= L-1; __iter_2__++) {
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1))){
tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))){
// Bottom
float __temp_3__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_12__ = (tilevar[0][ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_43__ = 0.5f * (__temp_33__ + __temp_42__) / 159;
t[0] += __temp_43__;
//printf ("computed top at (%d,%d,%d) = %.6f\n", __iter_2__, __iter_4__, __iter_5__, __temp_43__);
//Mid
float __temp_47__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_52__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[0] += __temp_88__;
// Top
float __temp_92__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_102__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] += __temp_133__;
}
// Copy the topmost value
if ((__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))) & (__iter_4__ < (FORMA_MAX((__iter_1__+1),1)+2) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))-2) | __iter_5__ < (FORMA_MAX((__iter_0__+1),1)+2) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))-2))) {
__copy_arr_0__[__iter_5__+N*(__iter_4__+M*__iter_2__)] = tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))){
float __temp_3__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_12__ = (tilevar[1][ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[1] += __temp_43__;
//printf ("Top at %d,%d,%d = %.6f\n", __iter_2__-1, __iter_4__, __iter_5__, __temp_43__);
//Mid
float __temp_47__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_52__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[1] += __temp_88__;
//printf ("Mid at %d,%d,%d = %.6f\n", __iter_2__-1, __iter_4__, __iter_5__, __temp_88__);
// Top
float __temp_92__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_102__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] += __temp_133__;
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))) & (__iter_4__ < (FORMA_MAX((__iter_1__+2),1)+2) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2))-2) | __iter_5__ < (FORMA_MAX((__iter_0__+2),1)+2) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))-2))) {
__copy_arr_1__[__iter_5__+N*(__iter_4__+M*__iter_2__)] = tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2))){
// Bottom
float __temp_3__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_12__ = (tilevar[2][ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[2] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_52__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[2] += __temp_88__;
// Top
float __temp_92__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_102__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] += __temp_133__;
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__+3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__+3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2))) & (__iter_4__ < (FORMA_MAX((__iter_1__+3),1)+2) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2))-2) | __iter_5__ < (FORMA_MAX((__iter_0__+3),1)+2) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2))-2))) {
__copy_arr_2__[__iter_5__+N*(__iter_4__+M*__iter_2__)] = tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+4),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+4),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-2))){
// Bottom
float __temp_3__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_12__ = (tilevar[3][ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[3] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_52__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[3] += __temp_88__;
// Top
float __temp_92__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_102__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
out += __temp_133__;
__var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-4,0))] = out;
//printf ("computed (%d,%d,%d) = %.6f\n", __iter_2__-4, __iter_4__, __iter_5__, out);
}
__syncthreads ();
// Now rotate
tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[0];
b[0] = t[0];
t[0] = 0.0f;
tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[1];
b[1] = t[1];
t[1] = 0.0f;
tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[2];
b[2] = t[2];
t[2] = 0.0f;
out= b[3];
b[3] = t[3];
t[3] = 0.0f;
}
}
/* X+GAP, Y, Z */
__global__ void __kernel___forma_kernel__1__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
__shared__ float tilevar[4][32*32];
float t[4], b[4], out=0.0f;
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)+GAPX) + (int)FORMA_BLOCKDIM_X;
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY);
// Initialize the values
int __iter_4__ = FORMA_MAX(__iter_1__,0) + (int)(threadIdx.y) ;
int __iter_5__ = FORMA_MAX(__iter_0__-EXTENT,0) + (int)(threadIdx.x) ;
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-1)) ){
tilevar[1][__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = 0.0f;
tilevar[2][__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = 0.0f;
tilevar[3][__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = 0.0f;
}
// Rest of the computation
for (int __iter_2__ = 0; __iter_2__ <= L-1; __iter_2__++) {
if(__iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-1))){
tilevar[0][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__-1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2)) ){
// Bottom
float __temp_3__ = (tilevar[0][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_12__ = (tilevar[0][ __iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[0][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[0][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[0] += __temp_43__;
//printf ("Top at %d,%d,%d = %.6f\n", __iter_2__, __iter_4__, __iter_5__, t[0]);
//Mid
float __temp_47__ = (tilevar[0][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_52__ = (tilevar[0][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[0][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[0][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[0][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[0][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[0][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[0][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[0][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[0] += __temp_88__;
//printf ("Mid at %d,%d,%d = %.6f\n", __iter_2__, __iter_4__, __iter_5__, __temp_88__);
// Top
float __temp_92__ = (tilevar[0][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_102__ = (tilevar[0][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[0][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[0][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
tilevar[1][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] += __temp_133__;
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__-1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2))) & (__iter_4__ < (FORMA_MAX((__iter_1__+1),1)+2) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))-2))) {
__copy_arr_0__[__iter_5__+N*(__iter_4__+M*__iter_2__)] = tilevar[1][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__+1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-2),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__-3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2))) & (__iter_5__ < FORMA_MAX((__iter_0__-1),1) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2)))) {
tilevar[1][__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
//printf ("Writing values at index (%d,%d) = %.6f\n", __iter_4__-__iter_1__, __iter_5__+(EXTENT-__iter_0__), __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__))]);
;
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__-2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2)) ){
float __temp_3__ = (tilevar[1][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_12__ = (tilevar[1][ __iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[1][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[1][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[1] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[1][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_52__ = (tilevar[1][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[1][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[1][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[1][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[1][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[1][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[1][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[1][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[1] += __temp_88__;
// Top
float __temp_92__ = (tilevar[1][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_102__ = (tilevar[1][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[1][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[1][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
tilevar[2][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] += __temp_133__;
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2))) & ( __iter_5__ >= FORMA_MAX((__iter_0__-2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2))) & (__iter_4__ < (FORMA_MAX((__iter_1__+2),1)+2) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2))-2))) {
__copy_arr_1__[__iter_5__+N*(__iter_4__+M*__iter_2__)] = tilevar[2][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__+2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-3),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__-4),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-2))) & (__iter_5__ < FORMA_MAX((__iter_0__-2),1) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2)))) {
tilevar[2][__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__-3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2)) ){
// Bottom
float __temp_3__ = (tilevar[2][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_12__ = (tilevar[2][ __iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[2][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[2][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[2] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[2][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_52__ = (tilevar[2][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[2][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[2][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[2][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[2][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[2][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[2][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[2][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[2] += __temp_88__;
// Top
float __temp_92__ = (tilevar[2][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_102__ = (tilevar[2][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[2][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[2][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
tilevar[3][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] += __temp_133__;
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__+3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__-3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2))) & (__iter_4__ < (FORMA_MAX((__iter_1__+3),1)+2) | __iter_4__ > (FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2))-2))) {
__copy_arr_2__[__iter_5__+N*(__iter_4__+M*__iter_2__)] = tilevar[3][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)];
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__+3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-4),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__-5),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(N-2))) & (__iter_5__ < FORMA_MAX((__iter_0__-3),1) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2)))) {
tilevar[3][__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__+4),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+FORMA_BLOCKDIM_Y)-5),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__-4),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-2)) ){
float __temp_3__ = (tilevar[3][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_12__ = (tilevar[3][ __iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[3][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[3][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[3] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[3][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_52__ = (tilevar[3][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[3][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[3][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[3][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[3][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[3][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[3][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[3][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[3] += __temp_88__;
// Top
float __temp_92__ = (tilevar[3][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_102__ = (tilevar[3][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[3][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[3][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
out += __temp_133__;
__var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-4,0))] = out;
}
__syncthreads ();
// Now rotate
tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[0];
b[0] = t[0];
t[0] = 0.0f;
tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[1];
b[1] = t[1];
t[1] = 0.0f;
tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[2];
b[2] = t[2];
t[2] = 0.0f;
out= b[3];
b[3] = t[3];
t[3] = 0.0f;
}
}
/* X, Y+GAP, Z */
__global__ void __kernel___forma_kernel__2__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
__shared__ float tilevar[4][32*32];
float t[4], b[4], out=0.0f;
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)+GAPX);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY) + (int)(FORMA_BLOCKDIM_Y);
int __iter_4__ = FORMA_MAX(__iter_1__-EXTENT,0) + (int)(threadIdx.y) ;
int __iter_5__ = FORMA_MAX(__iter_0__,0) + (int)(threadIdx.x) ;
if(__iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1)) ){
tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = 0.0f;
tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = 0.0f;
tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = 0.0f;
}
// Rest of the computation
for (int __iter_2__ = 0; __iter_2__ <= L-1; __iter_2__++) {
if(__iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-1),(N-1)) ){
tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2)) ){
// Bottom
float __temp_3__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_12__ = (tilevar[0][ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[0] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_52__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[0][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[0] += __temp_88__;
// Top
float __temp_92__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_102__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[0][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[0][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] += __temp_133__;
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__-1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))) & (__iter_5__ < (FORMA_MAX((__iter_0__+1),1)+2) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))-2))) {
__copy_arr_0__[__iter_5__+N*(__iter_4__+M*__iter_2__)] = tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)];
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__-3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__+1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-2),(N-2))) & (__iter_4__ < FORMA_MAX((__iter_1__-1),1) | __iter_4__ > FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2)))) {
tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2)) ){
// Bottom
float __temp_3__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_12__ = (tilevar[1][ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[1] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_52__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[1] += __temp_88__;
// Top
float __temp_92__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_102__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[1][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[1][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] += __temp_133__;
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__-2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))) & (__iter_5__ < (FORMA_MAX((__iter_0__+2),1)+2) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))-2))) {
__copy_arr_1__[__iter_5__+N*(__iter_4__+M*__iter_2__)] = tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)];
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__-4),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__+2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-3),(N-2))) & (__iter_4__ < FORMA_MAX((__iter_1__-2),1) | __iter_4__ > FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2)))) {
tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2)) ){
// Bottom
float __temp_3__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_12__ = (tilevar[2][ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[2] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_52__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[2] += __temp_88__;
// Top
float __temp_92__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_102__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[2][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[2][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] += __temp_133__;
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__-3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__+3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2))) & (__iter_5__ < (FORMA_MAX((__iter_0__+3),1)+2) | __iter_5__ > (FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2))-2))) {
__copy_arr_2__[__iter_5__+N*(__iter_4__+M*__iter_2__)] = tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)];
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__-5),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+5)-1),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__+3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-4),(N-2))) & (__iter_4__ < FORMA_MAX((__iter_1__-3),1) | __iter_4__ > FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2)))) {
tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-4),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__+4),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+FORMA_BLOCKDIM_X)-5),(N-2)) ){
// Bottom
float __temp_3__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_12__ = (tilevar[3][ __iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[3] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_52__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[3] += __temp_88__;
// Top
float __temp_92__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_102__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[3][__iter_5__-1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[3][__iter_5__+1-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
out += __temp_133__;
__var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-4,0))] = out;
}
__syncthreads ();
// Now rotate
tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[0];
b[0] = t[0];
t[0] = 0.0f;
tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[1];
b[1] = t[1];
t[1] = 0.0f;
tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[2];
b[2] = t[2];
t[2] = 0.0f;
out= b[3];
b[3] = t[3];
t[3] = 0.0f;
}
}
/* X+GAP, Y+GAP, Z */
__global__ void __kernel___forma_kernel__3__(float * __restrict__ input, int L, int M, int N, float * __restrict__ __copy_arr_0__, float * __restrict__ __copy_arr_1__, float * __restrict__ __copy_arr_2__, int FORMA_BLOCKDIM_X, int FORMA_BLOCKDIM_Y, int FORMA_BLOCKDIM_Z, float * __restrict__ __var_1__){
__shared__ float tilevar[4][32*32];
float t[4], b[4], out=0.0f;
int __iter_0__ = (int)(blockIdx.x)*((int)(FORMA_BLOCKDIM_X)+GAPX) + (int)(FORMA_BLOCKDIM_X);
int __iter_1__ = (int)(blockIdx.y)*((int)(FORMA_BLOCKDIM_Y)+GAPY) + (int)(FORMA_BLOCKDIM_Y);
int __iter_4__ = FORMA_MAX(__iter_1__-EXTENT,0) + (int)(threadIdx.y) ;
int __iter_5__ = FORMA_MAX(__iter_0__-EXTENT,0) + (int)(threadIdx.x) ;
if(__iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-1)) ){
tilevar[1][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = 0.0f;
tilevar[2][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = 0.0f;
tilevar[3][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = 0.0f;
}
for (int __iter_2__ = 0; __iter_2__ <= L-1; __iter_2__++) {
if(__iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-1)) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-1)) ){
tilevar[0][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] = input[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-1),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__-1),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2))){
// Bottom
float __temp_3__ = (tilevar[0][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_12__ = (tilevar[0][ __iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[0][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[0][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[0] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[0][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_52__ = (tilevar[0][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[0][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[0][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[0][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[0][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[0][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[0][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[0][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[0] += __temp_88__;
// Top
float __temp_92__ = (tilevar[0][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_102__ = (tilevar[0][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[0][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[0][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
tilevar[1][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] += __temp_133__;
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__-3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__-3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2))) & (__iter_4__ < FORMA_MAX((__iter_1__-1),1) | __iter_4__ > FORMA_MIN(((__iter_1__+GAPY+1)-1),(M-2)) | __iter_5__ < FORMA_MAX((__iter_0__-1),1) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+1)-1),(N-2)))) {
tilevar[1][__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = __copy_arr_0__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-2),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__-2),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2))){
// Bottom
float __temp_3__ = (tilevar[1][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_12__ = (tilevar[1][ __iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[1][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[1][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[1] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[1][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_52__ = (tilevar[1][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[1][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[1][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[1][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[1][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[1][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[1][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[1][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[1] += __temp_88__;
// Top
float __temp_92__ = (tilevar[1][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_102__ = (tilevar[1][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[1][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[1][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
tilevar[2][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] += __temp_133__;
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__-4),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__-4),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-2))) & (__iter_4__ < (FORMA_MAX((__iter_1__-2),1)) | __iter_4__ > (FORMA_MIN(((__iter_1__+GAPY+2)-1),(M-2))) | __iter_5__ < FORMA_MAX((__iter_0__-2),1) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+2)-1),(N-2)))) {
tilevar[2][__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = __copy_arr_1__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-3),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__-3),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2))){
// Bottom
float __temp_3__ = (tilevar[2][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_12__ = (tilevar[2][ __iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[2][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[2][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[2] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[2][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_52__ = (tilevar[2][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[2][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[2][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[2][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[2][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[2][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[2][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[2][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[2] += __temp_88__;
// Top
float __temp_92__ = (tilevar[2][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_102__ = (tilevar[2][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[2][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[2][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
tilevar[3][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)] += __temp_133__;
}
if ((__iter_4__ >= FORMA_MAX((__iter_1__-5),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+5)-1),(M-2))) & (__iter_5__ >= FORMA_MAX((__iter_0__-5),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+5)-1),(N-2))) & (__iter_4__ < (FORMA_MAX((__iter_1__-3),1)) | __iter_4__ > (FORMA_MIN(((__iter_1__+GAPY+3)-1),(M-2))) | __iter_5__ < FORMA_MAX((__iter_0__-3),1) | __iter_5__ > FORMA_MIN(((__iter_0__+GAPX+3)-1),(N-2)))) {
tilevar[3][__iter_5__+(EXTENT-__iter_0__)+FORMA_BLOCKDIM_X*(__iter_4__+(EXTENT-__iter_1__))] = __copy_arr_2__[__iter_5__+N*(__iter_4__+M*(__iter_2__))];
}
__syncthreads ();
if(__iter_4__ >= FORMA_MAX((__iter_1__-4),1) & __iter_4__ <= FORMA_MIN(((__iter_1__+GAPY+4)-1),(M-2)) & __iter_5__ >= FORMA_MAX((__iter_0__-4),1) & __iter_5__ <= FORMA_MIN(((__iter_0__+GAPX+4)-1),(N-2))){
// Bottom
float __temp_3__ = (tilevar[3][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_12__ = (tilevar[3][ __iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_13__ = (__temp_3__ + __temp_12__);
float __temp_32__ = (tilevar[3][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_33__ = (__temp_13__ + __temp_32__);
float __temp_42__ = (tilevar[3][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_43__ = 0.50f * (__temp_33__ + __temp_42__) / 159;
t[3] += __temp_43__;
//Mid
float __temp_47__ = (tilevar[3][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_52__ = (tilevar[3][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_53__ = (0.51f * __temp_47__ + 0.71f * __temp_52__);
float __temp_57__ = (tilevar[3][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_58__ = (__temp_53__ + 0.91f * __temp_57__);
float __temp_62__ = (tilevar[3][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_63__ = (__temp_58__ + 1.21f * __temp_62__);
float __temp_67__ = (tilevar[3][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_68__ = (__temp_63__ + 1.51f * __temp_67__);
float __temp_72__ = (tilevar[3][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+EXTENT-__iter_1__)]);
float __temp_73__ = (__temp_68__ + 1.21f * __temp_72__);
float __temp_77__ = (tilevar[3][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_78__ = (__temp_73__ + 0.91f * __temp_77__);
float __temp_82__ = (tilevar[3][__iter_5__+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_83__ = (__temp_78__ + 0.71f * __temp_82__);
float __temp_87__ = (tilevar[3][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_88__ = (__temp_83__ + 0.51f * __temp_87__) / 159;
b[3] += __temp_88__;
// Top
float __temp_92__ = (tilevar[3][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_102__ = (tilevar[3][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-1+EXTENT-__iter_1__)]);
float __temp_103__ = (__temp_92__ + __temp_102__);
float __temp_122__ = (tilevar[3][__iter_5__-1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_123__ = (__temp_103__ + __temp_122__);
float __temp_132__ = (tilevar[3][__iter_5__+1+EXTENT-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__+1+EXTENT-__iter_1__)]);
float __temp_133__ = 0.52f * (__temp_123__ + __temp_132__) / 159;
out += __temp_133__;
__var_1__[__iter_5__+N*(__iter_4__+M*FORMA_MAX(__iter_2__-4,0))] = out;
}
__syncthreads ();
// Now rotate
tilevar[1][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[0];
b[0] = t[0];
t[0] = 0.0f;
tilevar[2][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[1];
b[1] = t[1];
t[1] = 0.0f;
tilevar[3][__iter_5__-__iter_0__+FORMA_BLOCKDIM_X*(__iter_4__-__iter_1__)] = b[2];
b[2] = t[2];
t[2] = 0.0f;
out= b[3];
b[3] = t[3];
t[3] = 0.0f;
}
}
/*Device code End */
/* Host Code Begin */
extern "C" void j3d19pt(float * h_input, int L, int M, int N, float * __var_0__){
/* Host allocation Begin */
float * input;
cudaMalloc(&input,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : input\n");
cudaPointerAttributes ptrAttrib_h_input;
cudaMemcpyKind memcpy_kind_h_input = cudaMemcpyHostToDevice;
if (cudaPointerGetAttributes(&ptrAttrib_h_input, h_input) == cudaSuccess)
if (ptrAttrib_h_input.memoryType == cudaMemoryTypeDevice)
memcpy_kind_h_input = cudaMemcpyDeviceToDevice;
cudaGetLastError();
if( memcpy_kind_h_input != cudaMemcpyDeviceToDevice ){
cudaMemcpy(input,h_input,sizeof(float)*(L*M*N), memcpy_kind_h_input);
}
float * __var_1__;
cudaMalloc(&__var_1__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __var_1__\n");
float * __copy_arr_0__;
cudaMalloc(&__copy_arr_0__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __copy_arr_0__\n");
float * __copy_arr_1__;
cudaMalloc(&__copy_arr_1__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __copy_arr_1__\n");
float * __copy_arr_2__;
cudaMalloc(&__copy_arr_2__,sizeof(float)*(L*M*N));
Check_CUDA_Error("Allocation Error!! : __copy_arr_2__\n");
/*Host Allocation End */
/* Kernel Launch Begin */
int __FORMA_MAX_SHARED_MEM__;
cudaDeviceGetAttribute(&__FORMA_MAX_SHARED_MEM__,cudaDevAttrMaxSharedMemoryPerBlock,0);
#ifdef _TIMER_
cudaEvent_t _forma_timer_start_,_forma_timer_stop_;
cudaEventCreate(&_forma_timer_start_);
cudaEventCreate(&_forma_timer_stop_);
cudaEventRecord(_forma_timer_start_,0);
#endif
int __size_0___kernel___forma_kernel__0__ = N;
int __size_1___kernel___forma_kernel__0__ = M;
int __block_0___kernel___forma_kernel__0__ = 32;
int __block_1___kernel___forma_kernel__0__ = 32;
int __block_2___kernel___forma_kernel__0__ = 1;
dim3 __blockConfig___kernel___forma_kernel__0__(__block_0___kernel___forma_kernel__0__,__block_1___kernel___forma_kernel__0__,__block_2___kernel___forma_kernel__0__);
int __grid_0___kernel___forma_kernel__0__ = FORMA_CEIL(__size_0___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.x+GAPX);
int __grid_1___kernel___forma_kernel__0__ = FORMA_CEIL(__size_1___kernel___forma_kernel__0__,__blockConfig___kernel___forma_kernel__0__.y+GAPY);
int __grid_2___kernel___forma_kernel__0__ = 1;
dim3 __gridConfig___kernel___forma_kernel__0__(__grid_0___kernel___forma_kernel__0__,__grid_1___kernel___forma_kernel__0__,__grid_2___kernel___forma_kernel__0__);
dim3 unrollConfig (__blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z);
__kernel___forma_kernel__0__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig>>> (input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__0__\n");
__kernel___forma_kernel__1__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig>>> (input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__1__\n");
__kernel___forma_kernel__2__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig>>> (input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__2__\n");
__kernel___forma_kernel__3__<<<__gridConfig___kernel___forma_kernel__0__, unrollConfig>>> (input, L, M, N, __copy_arr_0__, __copy_arr_1__, __copy_arr_2__, __blockConfig___kernel___forma_kernel__0__.x, __blockConfig___kernel___forma_kernel__0__.y, __blockConfig___kernel___forma_kernel__0__.z, __var_1__);
Check_CUDA_Error("Kernel Launch Error!! : __kernel___forma_kernel__3__\n");
cudaPointerAttributes ptrAttrib___var_0__;
cudaMemcpyKind memcpy_kind___var_0__ = cudaMemcpyDeviceToHost;
if (cudaPointerGetAttributes(&ptrAttrib___var_0__, __var_0__) == cudaSuccess)
if (ptrAttrib___var_0__.memoryType == cudaMemoryTypeDevice)
memcpy_kind___var_0__ = cudaMemcpyDeviceToDevice;
cudaGetLastError();
cudaMemcpy(__var_0__,__var_1__, sizeof(float)*(L*M*N), memcpy_kind___var_0__);
#ifdef _TIMER_
cudaEventRecord(_forma_timer_stop_,0);
cudaEventSynchronize(_forma_timer_stop_);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,_forma_timer_start_,_forma_timer_stop_);
printf("[FORMA] Computation Time(ms) : %lf\n",elapsedTime);
cudaEventDestroy(_forma_timer_start_);
cudaEventDestroy(_forma_timer_stop_);
#endif
/*Kernel Launch End */
/* Host Free Begin */
cudaFree(input);
cudaFree(__var_1__);
cudaFree(__copy_arr_0__);
cudaFree(__copy_arr_1__);
cudaFree(__copy_arr_2__);
}
/*Host Free End*/
|
291edbf7443e584b409702999a9bc946566cb650.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <iomanip>
#include <hip/hip_runtime.h>
#include <gauge_field.h>
#include <quda_matrix.h>
#include <hisq_links_quda.h>
namespace quda{
namespace {
#include <svd_quda.h>
}
#ifndef FL_UNITARIZE_PI
#define FL_UNITARIZE_PI 3.14159265358979323846
#endif
#ifndef FL_UNITARIZE_PI23
#define FL_UNITARIZE_PI23 FL_UNITARIZE_PI*2.0/3.0
#endif
__constant__ int INPUT_PADDING=0;
__constant__ int OUTPUT_PADDING=0;
__constant__ int DEV_MAX_ITER = 20;
static int HOST_MAX_ITER = 20;
__constant__ double DEV_FL_MAX_ERROR;
__constant__ double DEV_FL_UNITARIZE_EPS;
__constant__ bool DEV_FL_REUNIT_ALLOW_SVD;
__constant__ bool DEV_FL_REUNIT_SVD_ONLY;
__constant__ double DEV_FL_REUNIT_SVD_REL_ERROR;
__constant__ double DEV_FL_REUNIT_SVD_ABS_ERROR;
__constant__ bool DEV_FL_CHECK_UNITARIZATION;
static double HOST_FL_MAX_ERROR;
static double HOST_FL_UNITARIZE_EPS;
static bool HOST_FL_REUNIT_ALLOW_SVD;
static bool HOST_FL_REUNIT_SVD_ONLY;
static double HOST_FL_REUNIT_SVD_REL_ERROR;
static double HOST_FL_REUNIT_SVD_ABS_ERROR;
static bool HOST_FL_CHECK_UNITARIZATION;
void setUnitarizeLinksPadding(int input_padding_h, int output_padding_h)
{
hipMemcpyToSymbol(INPUT_PADDING, &input_padding_h, sizeof(int));
hipMemcpyToSymbol(OUTPUT_PADDING, &output_padding_h, sizeof(int));
return;
}
template<class Cmplx>
__device__ __host__
bool isUnitary(const Matrix<Cmplx,3>& matrix, double max_error)
{
const Matrix<Cmplx,3> identity = conj(matrix)*matrix;
for(int i=0; i<3; ++i){
if( fabs(identity(i,i).x - 1.0) > max_error || fabs(identity(i,i).y) > max_error) return false;
for(int j=i+1; j<3; ++j){
if( fabs(identity(i,j).x) > max_error || fabs(identity(i,j).y) > max_error
|| fabs(identity(j,i).x) > max_error || fabs(identity(j,i).y) > max_error ){
return false;
}
}
}
return true;
}
template<class Cmplx>
__device__ __host__
bool isUnitarizedLinkConsistent(const Matrix<Cmplx,3>& initial_matrix,
const Matrix<Cmplx,3>& unitary_matrix,
double max_error)
{
Matrix<Cmplx,3> temporary;
temporary = conj(initial_matrix)*unitary_matrix;
temporary = temporary*temporary - conj(initial_matrix)*initial_matrix;
for(int i=0; i<3; ++i){
for(int j=0; j<3; ++j){
if( fabs(temporary(i,j).x) > max_error || fabs(temporary(i,j).y) > max_error){
return false;
}
}
}
return true;
}
void setUnitarizeLinksConstants(double unitarize_eps_h, double max_error_h,
bool allow_svd_h, bool svd_only_h,
double svd_rel_error_h, double svd_abs_error_h,
bool check_unitarization_h)
{
// not_set is only initialised once
static bool not_set=true;
if(not_set){
hipMemcpyToSymbol(DEV_FL_UNITARIZE_EPS, &unitarize_eps_h, sizeof(double));
hipMemcpyToSymbol(DEV_FL_REUNIT_ALLOW_SVD, &allow_svd_h, sizeof(bool));
hipMemcpyToSymbol(DEV_FL_REUNIT_SVD_ONLY, &svd_only_h, sizeof(bool));
hipMemcpyToSymbol(DEV_FL_REUNIT_SVD_REL_ERROR, &svd_rel_error_h, sizeof(double));
hipMemcpyToSymbol(DEV_FL_REUNIT_SVD_ABS_ERROR, &svd_abs_error_h, sizeof(double));
hipMemcpyToSymbol(DEV_FL_MAX_ERROR, &max_error_h, sizeof(double));
hipMemcpyToSymbol(DEV_FL_CHECK_UNITARIZATION, &check_unitarization_h, sizeof(bool));
HOST_FL_UNITARIZE_EPS = unitarize_eps_h;
HOST_FL_REUNIT_ALLOW_SVD = allow_svd_h;
HOST_FL_REUNIT_SVD_ONLY = svd_only_h;
HOST_FL_REUNIT_SVD_REL_ERROR = svd_rel_error_h;
HOST_FL_REUNIT_SVD_ABS_ERROR = svd_abs_error_h;
HOST_FL_MAX_ERROR = max_error_h;
HOST_FL_CHECK_UNITARIZATION = check_unitarization_h;
not_set = false;
}
checkCudaError();
return;
}
template<class T>
__device__ __host__
T getAbsMin(const T* const array, int size){
T min = fabs(array[0]);
for(int i=1; i<size; ++i){
T abs_val = fabs(array[i]);
if((abs_val) < min){ min = abs_val; }
}
return min;
}
template<class Real>
__device__ __host__
inline bool checkAbsoluteError(Real a, Real b, Real epsilon)
{
if( fabs(a-b) < epsilon) return true;
return false;
}
template<class Real>
__device__ __host__
inline bool checkRelativeError(Real a, Real b, Real epsilon)
{
if( fabs((a-b)/b) < epsilon ) return true;
return false;
}
// Compute the reciprocal square root of the matrix q
// Also modify q if the eigenvalues are dangerously small.
template<class Cmplx>
__device__ __host__
bool reciprocalRoot(const Matrix<Cmplx,3>& q, Matrix<Cmplx,3>* res){
Matrix<Cmplx,3> qsq, tempq;
typename RealTypeId<Cmplx>::Type c[3];
typename RealTypeId<Cmplx>::Type g[3];
qsq = q*q;
tempq = qsq*q;
c[0] = getTrace(q).x;
c[1] = getTrace(qsq).x/2.0;
c[2] = getTrace(tempq).x/3.0;
g[0] = g[1] = g[2] = c[0]/3.;
typename RealTypeId<Cmplx>::Type r,s,theta;
s = c[1]/3. - c[0]*c[0]/18;
#ifdef __CUDA_ARCH__
#define FL_UNITARIZE_EPS DEV_FL_UNITARIZE_EPS
#else
#define FL_UNITARIZE_EPS HOST_FL_UNITARIZE_EPS
#endif
#ifdef __CUDA_ARCH__
#define FL_REUNIT_SVD_REL_ERROR DEV_FL_REUNIT_SVD_REL_ERROR
#define FL_REUNIT_SVD_ABS_ERROR DEV_FL_REUNIT_SVD_ABS_ERROR
#else // cpu
#define FL_REUNIT_SVD_REL_ERROR HOST_FL_REUNIT_SVD_REL_ERROR
#define FL_REUNIT_SVD_ABS_ERROR HOST_FL_REUNIT_SVD_ABS_ERROR
#endif
typename RealTypeId<Cmplx>::Type cosTheta;
if(fabs(s) >= FL_UNITARIZE_EPS){
const typename RealTypeId<Cmplx>::Type sqrt_s = sqrt(s);
r = c[2]/2. - (c[0]/3.)*(c[1] - c[0]*c[0]/9.);
cosTheta = r/(sqrt_s*sqrt_s*sqrt_s);
if(fabs(cosTheta) >= 1.0){
if( r > 0 ){
theta = 0.0;
}else{
theta = FL_UNITARIZE_PI;
}
}else{
theta = acos(cosTheta);
}
g[0] = c[0]/3 + 2*sqrt_s*cos( theta/3 );
g[1] = c[0]/3 + 2*sqrt_s*cos( theta/3 + FL_UNITARIZE_PI23 );
g[2] = c[0]/3 + 2*sqrt_s*cos( theta/3 + 2*FL_UNITARIZE_PI23 );
}
// Check the eigenvalues, if the determinant does not match the product of the eigenvalues
// return false. Then call SVD instead.
typename RealTypeId<Cmplx>::Type det = getDeterminant(q).x;
if( fabs(det) < FL_REUNIT_SVD_ABS_ERROR ){
return false;
}
if( checkRelativeError(g[0]*g[1]*g[2],det,FL_REUNIT_SVD_REL_ERROR) == false ) return false;
// At this point we have finished with the c's
// use these to store sqrt(g)
for(int i=0; i<3; ++i) c[i] = sqrt(g[i]);
// done with the g's, use these to store u, v, w
g[0] = c[0]+c[1]+c[2];
g[1] = c[0]*c[1] + c[0]*c[2] + c[1]*c[2];
g[2] = c[0]*c[1]*c[2];
const typename RealTypeId<Cmplx>::Type & denominator = g[2]*(g[0]*g[1]-g[2]);
c[0] = (g[0]*g[1]*g[1] - g[2]*(g[0]*g[0]+g[1]))/denominator;
c[1] = (-g[0]*g[0]*g[0] - g[2] + 2.*g[0]*g[1])/denominator;
c[2] = g[0]/denominator;
tempq = c[1]*q + c[2]*qsq;
// Add a real scalar
tempq(0,0).x += c[0];
tempq(1,1).x += c[0];
tempq(2,2).x += c[0];
*res = tempq;
return true;
}
template<class Cmplx>
__host__ __device__
bool unitarizeLinkMILC(const Matrix<Cmplx,3>& in, Matrix<Cmplx,3>* const result)
{
Matrix<Cmplx,3> u;
#ifdef __CUDA_ARCH__
#define FL_REUNIT_SVD_ONLY DEV_FL_REUNIT_SVD_ONLY
#define FL_REUNIT_ALLOW_SVD DEV_FL_REUNIT_ALLOW_SVD
#else
#define FL_REUNIT_SVD_ONLY HOST_FL_REUNIT_SVD_ONLY
#define FL_REUNIT_ALLOW_SVD HOST_FL_REUNIT_ALLOW_SVD
#endif
if( !FL_REUNIT_SVD_ONLY ){
if( reciprocalRoot<Cmplx>(conj(in)*in,&u) ){
*result = in*u;
return true;
}
}
// If we've got this far, then the Caley-Hamilton unitarization
// has failed. If SVD is not allowed, the unitarization has failed.
if( !FL_REUNIT_ALLOW_SVD ) return false;
Matrix<Cmplx,3> v;
typename RealTypeId<Cmplx>::Type singular_values[3];
computeSVD<Cmplx>(in, u, v, singular_values); // should pass pointers to u, v I guess
*result = u*conj(v);
return true;
} // unitarizeMILC
template<class Cmplx>
__host__ __device__
bool unitarizeLinkSVD(const Matrix<Cmplx,3>& in, Matrix<Cmplx,3>* const result)
{
Matrix<Cmplx,3> u, v;
typename RealTypeId<Cmplx>::Type singular_values[3];
computeSVD<Cmplx>(in, u, v, singular_values); // should pass pointers to u,v I guess
*result = u*conj(v);
#ifdef __CUDA_ARCH__
#define FL_MAX_ERROR DEV_FL_MAX_ERROR
#else
#define FL_MAX_ERROR HOST_FL_MAX_ERROR
#endif
if(isUnitary(*result,FL_MAX_ERROR)==false)
{
#if (!defined(__CUDA_ARCH__) || (__COMPUTE_CAPABILITY__>=200))
printf("ERROR: Link unitarity test failed\n");
printf("TOLERANCE: %g\n", FL_MAX_ERROR);
#endif
return false;
}
return true;
}
#undef FL_MAX_ERROR
template<class Cmplx>
__host__ __device__
bool unitarizeLinkNewton(const Matrix<Cmplx,3>& in, Matrix<Cmplx,3>* const result)
{
Matrix<Cmplx,3> u, uinv;
u = in;
#ifdef __CUDA_ARCH__
#define MAX_ITER DEV_MAX_ITER
#else
#define MAX_ITER HOST_MAX_ITER
#endif
for(int i=0; i<MAX_ITER; ++i){
computeMatrixInverse(u, &uinv);
u = 0.5*(u + conj(uinv));
}
#undef MAX_ITER
if(isUnitarizedLinkConsistent(in,u,0.0000001)==false)
{
#if (!defined(__CUDA_ARCH__) || (__COMPUTE_CAPABILITY__>=200))
printf("ERROR: Unitarized link is not consistent with incoming link\n");
#endif
return false;
}
*result = u;
return true;
}
template<class Cmplx>
__global__ void getUnitarizedField(const Cmplx* inlink_even, const Cmplx* inlink_odd,
Cmplx* outlink_even, Cmplx* outlink_odd,
int* num_failures, const int threads)
{
int mem_idx = blockIdx.x*blockDim.x + threadIdx.x;
if (mem_idx >= threads) return;
const Cmplx* inlink;
Cmplx* outlink;
inlink = inlink_even;
outlink = outlink_even;
if(mem_idx >= Vh){
mem_idx = mem_idx - Vh;
inlink = inlink_odd;
outlink = outlink_odd;
}
// Unitarization is always done in double precision
Matrix<double2,3> v, result;
for(int dir=0; dir<4; ++dir){
loadLinkVariableFromArray(inlink, dir, mem_idx, Vh+INPUT_PADDING, &v);
unitarizeLinkMILC(v, &result);
#ifdef __CUDA_ARCH__
#define FL_MAX_ERROR DEV_FL_MAX_ERROR
#define FL_CHECK_UNITARIZATION DEV_FL_CHECK_UNITARIZATION
#else
#define FL_MAX_ERROR HOST_FL_MAX_ERROR
#define FL_CHECK_UNITARIZATION HOST_FL_CHECK_UNITARIZATION
#endif
if(FL_CHECK_UNITARIZATION){
if(isUnitary(result,FL_MAX_ERROR) == false)
{
#ifdef __CUDA_ARCH__
atomicAdd(num_failures, 1);
#else
(*num_failures)++;
#endif
}
}
writeLinkVariableToArray(result, dir, mem_idx, Vh+OUTPUT_PADDING, outlink);
}
return;
}
class UnitarizeLinksCuda : public Tunable {
private:
const cudaGaugeField &inField;
cudaGaugeField &outField;
int *fails;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; }
// don't tune the grid dimension
bool tuneGridDim() const { return false; }
unsigned int minThreads() const { return inField.Volume(); }
public:
UnitarizeLinksCuda(const cudaGaugeField& inField, cudaGaugeField& outField, int* fails) :
inField(inField), outField(outField), fails(fails) { ; }
virtual ~UnitarizeLinksCuda() { ; }
void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if(inField.Precision() == QUDA_SINGLE_PRECISION){
hipLaunchKernelGGL(( getUnitarizedField), dim3(tp.grid),dim3(tp.block), 0, 0, (float2*)inField.Even_p(), (float2*)inField.Odd_p(),
(float2*)outField.Even_p(), (float2*)outField.Odd_p(),
fails, inField.Volume());
}else if(inField.Precision() == QUDA_DOUBLE_PRECISION){
hipLaunchKernelGGL(( getUnitarizedField), dim3(tp.grid),dim3(tp.block), 0, 0, (double2*)inField.Even_p(), (double2*)inField.Odd_p(),
(double2*)outField.Even_p(), (double2*)outField.Odd_p(),
fails, inField.Volume());
} else {
errorQuda("UnitarizeLinks not implemented for precision %d", inField.Precision());
}
}
void preTune() { ; }
void postTune() { hipMemset(fails, 0, sizeof(int)); } // reset fails counter
long long flops() const { return 0; } // FIXME: add flops counter
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << inField.X()[0] << "x";
vol << inField.X()[1] << "x";
vol << inField.X()[2] << "x";
vol << inField.X()[3] << "x";
aux << "threads=" << inField.Volume() << ",prec=" << inField.Precision();
aux << "stride=" << inField.Stride();
return TuneKey(vol.str(), typeid(*this).name(), aux.str());
}
}; // UnitarizeLinksCuda
void unitarizeLinksCuda(const QudaGaugeParam& param,
cudaGaugeField& inField,
cudaGaugeField* outField,
int* fails) {
UnitarizeLinksCuda unitarizeLinks(inField, *outField, fails);
unitarizeLinks.apply(0);
}
void unitarizeLinksCPU(const QudaGaugeParam& param, cpuGaugeField& infield, cpuGaugeField* outfield)
{
int num_failures = 0;
Matrix<double2,3> inlink, outlink;
for(int i=0; i<infield.Volume(); ++i){
for(int dir=0; dir<4; ++dir){
if(param.cpu_prec == QUDA_SINGLE_PRECISION){
copyArrayToLink(&inlink, ((float*)(infield.Gauge_p()) + (i*4 + dir)*18)); // order of arguments?
if( unitarizeLinkNewton<double2>(inlink, &outlink) == false ) num_failures++;
copyLinkToArray(((float*)(outfield->Gauge_p()) + (i*4 + dir)*18), outlink);
}else if(param.cpu_prec == QUDA_DOUBLE_PRECISION){
copyArrayToLink(&inlink, ((double*)(infield.Gauge_p()) + (i*4 + dir)*18)); // order of arguments?
if( unitarizeLinkNewton<double2>(inlink, &outlink) == false ) num_failures++;
copyLinkToArray(((double*)(outfield->Gauge_p()) + (i*4 + dir)*18), outlink);
} // precision?
} // dir
} // loop over volume
return;
}
// CPU function which checks that the gauge field is unitary
bool isUnitary(const QudaGaugeParam& param, cpuGaugeField& field, double max_error)
{
Matrix<double2,3> link, identity;
for(int i=0; i<field.Volume(); ++i){
for(int dir=0; dir<4; ++dir){
if(param.cpu_prec == QUDA_SINGLE_PRECISION){
copyArrayToLink(&link, ((float*)(field.Gauge_p()) + (i*4 + dir)*18)); // order of arguments?
}else if(param.cpu_prec == QUDA_DOUBLE_PRECISION){
copyArrayToLink(&link, ((double*)(field.Gauge_p()) + (i*4 + dir)*18)); // order of arguments?
}else{
errorQuda("Unsupported precision\n");
}
if(isUnitary(link,max_error) == false){
printf("Unitarity failure\n");
printf("site index = %d,\t direction = %d\n", i, dir);
printLink(link);
identity = conj(link)*link;
printLink(identity);
return false;
}
} // dir
} // i
return true;
} // is unitary
} // namespace quda
| 291edbf7443e584b409702999a9bc946566cb650.cu | #include <cstdlib>
#include <cstdio>
#include <iostream>
#include <iomanip>
#include <cuda.h>
#include <gauge_field.h>
#include <quda_matrix.h>
#include <hisq_links_quda.h>
namespace quda{
namespace {
#include <svd_quda.h>
}
#ifndef FL_UNITARIZE_PI
#define FL_UNITARIZE_PI 3.14159265358979323846
#endif
#ifndef FL_UNITARIZE_PI23
#define FL_UNITARIZE_PI23 FL_UNITARIZE_PI*2.0/3.0
#endif
__constant__ int INPUT_PADDING=0;
__constant__ int OUTPUT_PADDING=0;
__constant__ int DEV_MAX_ITER = 20;
static int HOST_MAX_ITER = 20;
__constant__ double DEV_FL_MAX_ERROR;
__constant__ double DEV_FL_UNITARIZE_EPS;
__constant__ bool DEV_FL_REUNIT_ALLOW_SVD;
__constant__ bool DEV_FL_REUNIT_SVD_ONLY;
__constant__ double DEV_FL_REUNIT_SVD_REL_ERROR;
__constant__ double DEV_FL_REUNIT_SVD_ABS_ERROR;
__constant__ bool DEV_FL_CHECK_UNITARIZATION;
static double HOST_FL_MAX_ERROR;
static double HOST_FL_UNITARIZE_EPS;
static bool HOST_FL_REUNIT_ALLOW_SVD;
static bool HOST_FL_REUNIT_SVD_ONLY;
static double HOST_FL_REUNIT_SVD_REL_ERROR;
static double HOST_FL_REUNIT_SVD_ABS_ERROR;
static bool HOST_FL_CHECK_UNITARIZATION;
void setUnitarizeLinksPadding(int input_padding_h, int output_padding_h)
{
cudaMemcpyToSymbol(INPUT_PADDING, &input_padding_h, sizeof(int));
cudaMemcpyToSymbol(OUTPUT_PADDING, &output_padding_h, sizeof(int));
return;
}
template<class Cmplx>
__device__ __host__
bool isUnitary(const Matrix<Cmplx,3>& matrix, double max_error)
{
const Matrix<Cmplx,3> identity = conj(matrix)*matrix;
for(int i=0; i<3; ++i){
if( fabs(identity(i,i).x - 1.0) > max_error || fabs(identity(i,i).y) > max_error) return false;
for(int j=i+1; j<3; ++j){
if( fabs(identity(i,j).x) > max_error || fabs(identity(i,j).y) > max_error
|| fabs(identity(j,i).x) > max_error || fabs(identity(j,i).y) > max_error ){
return false;
}
}
}
return true;
}
template<class Cmplx>
__device__ __host__
bool isUnitarizedLinkConsistent(const Matrix<Cmplx,3>& initial_matrix,
const Matrix<Cmplx,3>& unitary_matrix,
double max_error)
{
Matrix<Cmplx,3> temporary;
temporary = conj(initial_matrix)*unitary_matrix;
temporary = temporary*temporary - conj(initial_matrix)*initial_matrix;
for(int i=0; i<3; ++i){
for(int j=0; j<3; ++j){
if( fabs(temporary(i,j).x) > max_error || fabs(temporary(i,j).y) > max_error){
return false;
}
}
}
return true;
}
void setUnitarizeLinksConstants(double unitarize_eps_h, double max_error_h,
bool allow_svd_h, bool svd_only_h,
double svd_rel_error_h, double svd_abs_error_h,
bool check_unitarization_h)
{
// not_set is only initialised once
static bool not_set=true;
if(not_set){
cudaMemcpyToSymbol(DEV_FL_UNITARIZE_EPS, &unitarize_eps_h, sizeof(double));
cudaMemcpyToSymbol(DEV_FL_REUNIT_ALLOW_SVD, &allow_svd_h, sizeof(bool));
cudaMemcpyToSymbol(DEV_FL_REUNIT_SVD_ONLY, &svd_only_h, sizeof(bool));
cudaMemcpyToSymbol(DEV_FL_REUNIT_SVD_REL_ERROR, &svd_rel_error_h, sizeof(double));
cudaMemcpyToSymbol(DEV_FL_REUNIT_SVD_ABS_ERROR, &svd_abs_error_h, sizeof(double));
cudaMemcpyToSymbol(DEV_FL_MAX_ERROR, &max_error_h, sizeof(double));
cudaMemcpyToSymbol(DEV_FL_CHECK_UNITARIZATION, &check_unitarization_h, sizeof(bool));
HOST_FL_UNITARIZE_EPS = unitarize_eps_h;
HOST_FL_REUNIT_ALLOW_SVD = allow_svd_h;
HOST_FL_REUNIT_SVD_ONLY = svd_only_h;
HOST_FL_REUNIT_SVD_REL_ERROR = svd_rel_error_h;
HOST_FL_REUNIT_SVD_ABS_ERROR = svd_abs_error_h;
HOST_FL_MAX_ERROR = max_error_h;
HOST_FL_CHECK_UNITARIZATION = check_unitarization_h;
not_set = false;
}
checkCudaError();
return;
}
template<class T>
__device__ __host__
T getAbsMin(const T* const array, int size){
T min = fabs(array[0]);
for(int i=1; i<size; ++i){
T abs_val = fabs(array[i]);
if((abs_val) < min){ min = abs_val; }
}
return min;
}
template<class Real>
__device__ __host__
inline bool checkAbsoluteError(Real a, Real b, Real epsilon)
{
if( fabs(a-b) < epsilon) return true;
return false;
}
template<class Real>
__device__ __host__
inline bool checkRelativeError(Real a, Real b, Real epsilon)
{
if( fabs((a-b)/b) < epsilon ) return true;
return false;
}
// Compute the reciprocal square root of the matrix q
// Also modify q if the eigenvalues are dangerously small.
template<class Cmplx>
__device__ __host__
bool reciprocalRoot(const Matrix<Cmplx,3>& q, Matrix<Cmplx,3>* res){
Matrix<Cmplx,3> qsq, tempq;
typename RealTypeId<Cmplx>::Type c[3];
typename RealTypeId<Cmplx>::Type g[3];
qsq = q*q;
tempq = qsq*q;
c[0] = getTrace(q).x;
c[1] = getTrace(qsq).x/2.0;
c[2] = getTrace(tempq).x/3.0;
g[0] = g[1] = g[2] = c[0]/3.;
typename RealTypeId<Cmplx>::Type r,s,theta;
s = c[1]/3. - c[0]*c[0]/18;
#ifdef __CUDA_ARCH__
#define FL_UNITARIZE_EPS DEV_FL_UNITARIZE_EPS
#else
#define FL_UNITARIZE_EPS HOST_FL_UNITARIZE_EPS
#endif
#ifdef __CUDA_ARCH__
#define FL_REUNIT_SVD_REL_ERROR DEV_FL_REUNIT_SVD_REL_ERROR
#define FL_REUNIT_SVD_ABS_ERROR DEV_FL_REUNIT_SVD_ABS_ERROR
#else // cpu
#define FL_REUNIT_SVD_REL_ERROR HOST_FL_REUNIT_SVD_REL_ERROR
#define FL_REUNIT_SVD_ABS_ERROR HOST_FL_REUNIT_SVD_ABS_ERROR
#endif
typename RealTypeId<Cmplx>::Type cosTheta;
if(fabs(s) >= FL_UNITARIZE_EPS){
const typename RealTypeId<Cmplx>::Type sqrt_s = sqrt(s);
r = c[2]/2. - (c[0]/3.)*(c[1] - c[0]*c[0]/9.);
cosTheta = r/(sqrt_s*sqrt_s*sqrt_s);
if(fabs(cosTheta) >= 1.0){
if( r > 0 ){
theta = 0.0;
}else{
theta = FL_UNITARIZE_PI;
}
}else{
theta = acos(cosTheta);
}
g[0] = c[0]/3 + 2*sqrt_s*cos( theta/3 );
g[1] = c[0]/3 + 2*sqrt_s*cos( theta/3 + FL_UNITARIZE_PI23 );
g[2] = c[0]/3 + 2*sqrt_s*cos( theta/3 + 2*FL_UNITARIZE_PI23 );
}
// Check the eigenvalues, if the determinant does not match the product of the eigenvalues
// return false. Then call SVD instead.
typename RealTypeId<Cmplx>::Type det = getDeterminant(q).x;
if( fabs(det) < FL_REUNIT_SVD_ABS_ERROR ){
return false;
}
if( checkRelativeError(g[0]*g[1]*g[2],det,FL_REUNIT_SVD_REL_ERROR) == false ) return false;
// At this point we have finished with the c's
// use these to store sqrt(g)
for(int i=0; i<3; ++i) c[i] = sqrt(g[i]);
// done with the g's, use these to store u, v, w
g[0] = c[0]+c[1]+c[2];
g[1] = c[0]*c[1] + c[0]*c[2] + c[1]*c[2];
g[2] = c[0]*c[1]*c[2];
const typename RealTypeId<Cmplx>::Type & denominator = g[2]*(g[0]*g[1]-g[2]);
c[0] = (g[0]*g[1]*g[1] - g[2]*(g[0]*g[0]+g[1]))/denominator;
c[1] = (-g[0]*g[0]*g[0] - g[2] + 2.*g[0]*g[1])/denominator;
c[2] = g[0]/denominator;
tempq = c[1]*q + c[2]*qsq;
// Add a real scalar
tempq(0,0).x += c[0];
tempq(1,1).x += c[0];
tempq(2,2).x += c[0];
*res = tempq;
return true;
}
template<class Cmplx>
__host__ __device__
bool unitarizeLinkMILC(const Matrix<Cmplx,3>& in, Matrix<Cmplx,3>* const result)
{
Matrix<Cmplx,3> u;
#ifdef __CUDA_ARCH__
#define FL_REUNIT_SVD_ONLY DEV_FL_REUNIT_SVD_ONLY
#define FL_REUNIT_ALLOW_SVD DEV_FL_REUNIT_ALLOW_SVD
#else
#define FL_REUNIT_SVD_ONLY HOST_FL_REUNIT_SVD_ONLY
#define FL_REUNIT_ALLOW_SVD HOST_FL_REUNIT_ALLOW_SVD
#endif
if( !FL_REUNIT_SVD_ONLY ){
if( reciprocalRoot<Cmplx>(conj(in)*in,&u) ){
*result = in*u;
return true;
}
}
// If we've got this far, then the Caley-Hamilton unitarization
// has failed. If SVD is not allowed, the unitarization has failed.
if( !FL_REUNIT_ALLOW_SVD ) return false;
Matrix<Cmplx,3> v;
typename RealTypeId<Cmplx>::Type singular_values[3];
computeSVD<Cmplx>(in, u, v, singular_values); // should pass pointers to u, v I guess
*result = u*conj(v);
return true;
} // unitarizeMILC
template<class Cmplx>
__host__ __device__
bool unitarizeLinkSVD(const Matrix<Cmplx,3>& in, Matrix<Cmplx,3>* const result)
{
Matrix<Cmplx,3> u, v;
typename RealTypeId<Cmplx>::Type singular_values[3];
computeSVD<Cmplx>(in, u, v, singular_values); // should pass pointers to u,v I guess
*result = u*conj(v);
#ifdef __CUDA_ARCH__
#define FL_MAX_ERROR DEV_FL_MAX_ERROR
#else
#define FL_MAX_ERROR HOST_FL_MAX_ERROR
#endif
if(isUnitary(*result,FL_MAX_ERROR)==false)
{
#if (!defined(__CUDA_ARCH__) || (__COMPUTE_CAPABILITY__>=200))
printf("ERROR: Link unitarity test failed\n");
printf("TOLERANCE: %g\n", FL_MAX_ERROR);
#endif
return false;
}
return true;
}
#undef FL_MAX_ERROR
template<class Cmplx>
__host__ __device__
bool unitarizeLinkNewton(const Matrix<Cmplx,3>& in, Matrix<Cmplx,3>* const result)
{
Matrix<Cmplx,3> u, uinv;
u = in;
#ifdef __CUDA_ARCH__
#define MAX_ITER DEV_MAX_ITER
#else
#define MAX_ITER HOST_MAX_ITER
#endif
for(int i=0; i<MAX_ITER; ++i){
computeMatrixInverse(u, &uinv);
u = 0.5*(u + conj(uinv));
}
#undef MAX_ITER
if(isUnitarizedLinkConsistent(in,u,0.0000001)==false)
{
#if (!defined(__CUDA_ARCH__) || (__COMPUTE_CAPABILITY__>=200))
printf("ERROR: Unitarized link is not consistent with incoming link\n");
#endif
return false;
}
*result = u;
return true;
}
template<class Cmplx>
__global__ void getUnitarizedField(const Cmplx* inlink_even, const Cmplx* inlink_odd,
Cmplx* outlink_even, Cmplx* outlink_odd,
int* num_failures, const int threads)
{
int mem_idx = blockIdx.x*blockDim.x + threadIdx.x;
if (mem_idx >= threads) return;
const Cmplx* inlink;
Cmplx* outlink;
inlink = inlink_even;
outlink = outlink_even;
if(mem_idx >= Vh){
mem_idx = mem_idx - Vh;
inlink = inlink_odd;
outlink = outlink_odd;
}
// Unitarization is always done in double precision
Matrix<double2,3> v, result;
for(int dir=0; dir<4; ++dir){
loadLinkVariableFromArray(inlink, dir, mem_idx, Vh+INPUT_PADDING, &v);
unitarizeLinkMILC(v, &result);
#ifdef __CUDA_ARCH__
#define FL_MAX_ERROR DEV_FL_MAX_ERROR
#define FL_CHECK_UNITARIZATION DEV_FL_CHECK_UNITARIZATION
#else
#define FL_MAX_ERROR HOST_FL_MAX_ERROR
#define FL_CHECK_UNITARIZATION HOST_FL_CHECK_UNITARIZATION
#endif
if(FL_CHECK_UNITARIZATION){
if(isUnitary(result,FL_MAX_ERROR) == false)
{
#ifdef __CUDA_ARCH__
atomicAdd(num_failures, 1);
#else
(*num_failures)++;
#endif
}
}
writeLinkVariableToArray(result, dir, mem_idx, Vh+OUTPUT_PADDING, outlink);
}
return;
}
class UnitarizeLinksCuda : public Tunable {
private:
const cudaGaugeField &inField;
cudaGaugeField &outField;
int *fails;
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam &) const { return 0; }
// don't tune the grid dimension
bool tuneGridDim() const { return false; }
unsigned int minThreads() const { return inField.Volume(); }
public:
UnitarizeLinksCuda(const cudaGaugeField& inField, cudaGaugeField& outField, int* fails) :
inField(inField), outField(outField), fails(fails) { ; }
virtual ~UnitarizeLinksCuda() { ; }
void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if(inField.Precision() == QUDA_SINGLE_PRECISION){
getUnitarizedField<<<tp.grid,tp.block>>>((float2*)inField.Even_p(), (float2*)inField.Odd_p(),
(float2*)outField.Even_p(), (float2*)outField.Odd_p(),
fails, inField.Volume());
}else if(inField.Precision() == QUDA_DOUBLE_PRECISION){
getUnitarizedField<<<tp.grid,tp.block>>>((double2*)inField.Even_p(), (double2*)inField.Odd_p(),
(double2*)outField.Even_p(), (double2*)outField.Odd_p(),
fails, inField.Volume());
} else {
errorQuda("UnitarizeLinks not implemented for precision %d", inField.Precision());
}
}
void preTune() { ; }
void postTune() { cudaMemset(fails, 0, sizeof(int)); } // reset fails counter
long long flops() const { return 0; } // FIXME: add flops counter
TuneKey tuneKey() const {
std::stringstream vol, aux;
vol << inField.X()[0] << "x";
vol << inField.X()[1] << "x";
vol << inField.X()[2] << "x";
vol << inField.X()[3] << "x";
aux << "threads=" << inField.Volume() << ",prec=" << inField.Precision();
aux << "stride=" << inField.Stride();
return TuneKey(vol.str(), typeid(*this).name(), aux.str());
}
}; // UnitarizeLinksCuda
void unitarizeLinksCuda(const QudaGaugeParam& param,
cudaGaugeField& inField,
cudaGaugeField* outField,
int* fails) {
UnitarizeLinksCuda unitarizeLinks(inField, *outField, fails);
unitarizeLinks.apply(0);
}
void unitarizeLinksCPU(const QudaGaugeParam& param, cpuGaugeField& infield, cpuGaugeField* outfield)
{
int num_failures = 0;
Matrix<double2,3> inlink, outlink;
for(int i=0; i<infield.Volume(); ++i){
for(int dir=0; dir<4; ++dir){
if(param.cpu_prec == QUDA_SINGLE_PRECISION){
copyArrayToLink(&inlink, ((float*)(infield.Gauge_p()) + (i*4 + dir)*18)); // order of arguments?
if( unitarizeLinkNewton<double2>(inlink, &outlink) == false ) num_failures++;
copyLinkToArray(((float*)(outfield->Gauge_p()) + (i*4 + dir)*18), outlink);
}else if(param.cpu_prec == QUDA_DOUBLE_PRECISION){
copyArrayToLink(&inlink, ((double*)(infield.Gauge_p()) + (i*4 + dir)*18)); // order of arguments?
if( unitarizeLinkNewton<double2>(inlink, &outlink) == false ) num_failures++;
copyLinkToArray(((double*)(outfield->Gauge_p()) + (i*4 + dir)*18), outlink);
} // precision?
} // dir
} // loop over volume
return;
}
// CPU function which checks that the gauge field is unitary
bool isUnitary(const QudaGaugeParam& param, cpuGaugeField& field, double max_error)
{
Matrix<double2,3> link, identity;
for(int i=0; i<field.Volume(); ++i){
for(int dir=0; dir<4; ++dir){
if(param.cpu_prec == QUDA_SINGLE_PRECISION){
copyArrayToLink(&link, ((float*)(field.Gauge_p()) + (i*4 + dir)*18)); // order of arguments?
}else if(param.cpu_prec == QUDA_DOUBLE_PRECISION){
copyArrayToLink(&link, ((double*)(field.Gauge_p()) + (i*4 + dir)*18)); // order of arguments?
}else{
errorQuda("Unsupported precision\n");
}
if(isUnitary(link,max_error) == false){
printf("Unitarity failure\n");
printf("site index = %d,\t direction = %d\n", i, dir);
printLink(link);
identity = conj(link)*link;
printLink(identity);
return false;
}
} // dir
} // i
return true;
} // is unitary
} // namespace quda
|
2745a90c0008181d3a8a5b0290fb38ce50041878.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<math.h>
#include<stdlib.h>
#include<time.h>
#define N 2048
using namespace std;
void random_ints(int *vector, int size){
for(int i=0; i<size; i++)
vector[i] = rand()%10;
}
void copy_int_to_float(float *dest, int *src, int size){
for(int i=0; i<size; i++)
dest[i] = float(src[i]);
}
__global__ void min(int *vector){
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads > 0){
if(tid < number_of_threads){
int first_index = tid * step_size *2;
int second_index = first_index + step_size;
vector[first_index] = vector[first_index] > vector[second_index] ? vector[second_index] : vector[first_index];
}
step_size <<= 1;
number_of_threads >>= 1;
}
}
__global__ void max(int *vector){
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads > 0){
if(tid < number_of_threads){
int first_index = tid * step_size *2;
int second_index = first_index + step_size;
vector[first_index] = vector[first_index] < vector[second_index] ? vector[second_index] : vector[first_index];
}
step_size <<= 1;
number_of_threads >>= 1;
}
}
__global__ void sum(int *vector){
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads > 0){
if(tid < number_of_threads){ //If thread is alive
int first_index = tid * step_size * 2; //As each thread operates on 2 elements.
int second_index = first_index + step_size;
vector[first_index] += vector[second_index];
}
step_size <<= 1;
number_of_threads >>= 1;
}
}
__global__ void sum_floats(float *vector){
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads > 0){
if(tid < number_of_threads){ //If thread is alive
int first_index = tid * step_size * 2; //As each thread operates on 2 elements.
int second_index = first_index + step_size;
vector[first_index] += vector[second_index];
}
step_size <<= 1;
number_of_threads >>= 1;
}
}
__global__ void mean_diff_sq(float *vector, float mean){ //Calculates (x - x')^2
vector[threadIdx.x] -= mean;
vector[threadIdx.x] *= vector[threadIdx.x];
}
int main(void){
int size = N * sizeof(int);
int *vec; //Host copy of vec
int *d_vec; //Device copy of vec
int result;
srand(time(0));
vec = (int *)malloc(size);
random_ints(vec, N);
hipMalloc((void **)&d_vec, size);
//SUM
hipMemcpy(d_vec, vec, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( sum), dim3(1), dim3(N/2), 0, 0, d_vec);
//Copy the first element of array back to result
hipMemcpy(&result, d_vec, sizeof(int), hipMemcpyDeviceToHost);
printf("Sum is: %d", result);
//MIN
hipMemcpy(d_vec, vec, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( min), dim3(1), dim3(N/2), 0, 0, d_vec);
//Copy the first element of array back to result
hipMemcpy(&result, d_vec, sizeof(int), hipMemcpyDeviceToHost);
printf("\\nMin is: %d", result);
//MAX
hipMemcpy(d_vec, vec, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( max), dim3(1), dim3(N/2), 0, 0, d_vec);
//Copy the first element of array back to result
hipMemcpy(&result, d_vec, sizeof(int), hipMemcpyDeviceToHost);
printf("\\nMax is: %d", result);
//MEAN
hipMemcpy(d_vec, vec, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( sum), dim3(1), dim3(N/2), 0, 0, d_vec);
//Copy the first element of array back to result
hipMemcpy(&result, d_vec, sizeof(int), hipMemcpyDeviceToHost);
float mean = float(result)/N;
printf("\\nMean is: %f", mean);
//STD. DEV
float *float_vec;
float *d_float_vec;
float_vec = (float *)malloc(N*sizeof(float));
hipMalloc((void **)&d_float_vec, N*sizeof(float));
copy_int_to_float(float_vec, vec, N);
hipMemcpy(d_float_vec, float_vec, N*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( mean_diff_sq), dim3(1), dim3(N), 0, 0, d_float_vec, mean);
hipLaunchKernelGGL(( sum_floats), dim3(1), dim3(N/2), 0, 0, d_float_vec);
float res;
hipMemcpy(&res, d_float_vec, sizeof(res), hipMemcpyDeviceToHost);
res /= N;
printf("\\nVariance: %f", res);
res = sqrt(res);
printf("\\nStd. Dev: %f", res);
//Free allocated memory
hipFree(d_vec);
printf("\\n");
return 0;
} | 2745a90c0008181d3a8a5b0290fb38ce50041878.cu | #include<iostream>
#include<math.h>
#include<stdlib.h>
#include<time.h>
#define N 2048
using namespace std;
void random_ints(int *vector, int size){
for(int i=0; i<size; i++)
vector[i] = rand()%10;
}
void copy_int_to_float(float *dest, int *src, int size){
for(int i=0; i<size; i++)
dest[i] = float(src[i]);
}
__global__ void min(int *vector){
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads > 0){
if(tid < number_of_threads){
int first_index = tid * step_size *2;
int second_index = first_index + step_size;
vector[first_index] = vector[first_index] > vector[second_index] ? vector[second_index] : vector[first_index];
}
step_size <<= 1;
number_of_threads >>= 1;
}
}
__global__ void max(int *vector){
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads > 0){
if(tid < number_of_threads){
int first_index = tid * step_size *2;
int second_index = first_index + step_size;
vector[first_index] = vector[first_index] < vector[second_index] ? vector[second_index] : vector[first_index];
}
step_size <<= 1;
number_of_threads >>= 1;
}
}
__global__ void sum(int *vector){
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads > 0){
if(tid < number_of_threads){ //If thread is alive
int first_index = tid * step_size * 2; //As each thread operates on 2 elements.
int second_index = first_index + step_size;
vector[first_index] += vector[second_index];
}
step_size <<= 1;
number_of_threads >>= 1;
}
}
__global__ void sum_floats(float *vector){
int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while(number_of_threads > 0){
if(tid < number_of_threads){ //If thread is alive
int first_index = tid * step_size * 2; //As each thread operates on 2 elements.
int second_index = first_index + step_size;
vector[first_index] += vector[second_index];
}
step_size <<= 1;
number_of_threads >>= 1;
}
}
__global__ void mean_diff_sq(float *vector, float mean){ //Calculates (x - x')^2
vector[threadIdx.x] -= mean;
vector[threadIdx.x] *= vector[threadIdx.x];
}
int main(void){
int size = N * sizeof(int);
int *vec; //Host copy of vec
int *d_vec; //Device copy of vec
int result;
srand(time(0));
vec = (int *)malloc(size);
random_ints(vec, N);
cudaMalloc((void **)&d_vec, size);
//SUM
cudaMemcpy(d_vec, vec, size, cudaMemcpyHostToDevice);
sum<<<1, N/2>>>(d_vec);
//Copy the first element of array back to result
cudaMemcpy(&result, d_vec, sizeof(int), cudaMemcpyDeviceToHost);
printf("Sum is: %d", result);
//MIN
cudaMemcpy(d_vec, vec, size, cudaMemcpyHostToDevice);
min<<<1, N/2>>>(d_vec);
//Copy the first element of array back to result
cudaMemcpy(&result, d_vec, sizeof(int), cudaMemcpyDeviceToHost);
printf("\\nMin is: %d", result);
//MAX
cudaMemcpy(d_vec, vec, size, cudaMemcpyHostToDevice);
max<<<1, N/2>>>(d_vec);
//Copy the first element of array back to result
cudaMemcpy(&result, d_vec, sizeof(int), cudaMemcpyDeviceToHost);
printf("\\nMax is: %d", result);
//MEAN
cudaMemcpy(d_vec, vec, size, cudaMemcpyHostToDevice);
sum<<<1, N/2>>>(d_vec);
//Copy the first element of array back to result
cudaMemcpy(&result, d_vec, sizeof(int), cudaMemcpyDeviceToHost);
float mean = float(result)/N;
printf("\\nMean is: %f", mean);
//STD. DEV
float *float_vec;
float *d_float_vec;
float_vec = (float *)malloc(N*sizeof(float));
cudaMalloc((void **)&d_float_vec, N*sizeof(float));
copy_int_to_float(float_vec, vec, N);
cudaMemcpy(d_float_vec, float_vec, N*sizeof(float), cudaMemcpyHostToDevice);
mean_diff_sq<<<1, N>>>(d_float_vec, mean);
sum_floats<<<1, N/2>>>(d_float_vec);
float res;
cudaMemcpy(&res, d_float_vec, sizeof(res), cudaMemcpyDeviceToHost);
res /= N;
printf("\\nVariance: %f", res);
res = sqrt(res);
printf("\\nStd. Dev: %f", res);
//Free allocated memory
cudaFree(d_vec);
printf("\\n");
return 0;
} |
56862a0dc9dcfbc7df20cb4f74bbc0bc9d8db6e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#define BLOCK_SIZE 16
#define BLOCK_SIZE_SH 18
#define HEADER_SIZE 122
typedef unsigned char BYTE;
/**
* Structure that represents a BMP image.
*/
typedef struct
{
int width;
int height;
float *data;
} BMPImage;
typedef struct timeval tval;
BYTE g_info[HEADER_SIZE]; // Reference header
/**
* Reads a BMP 24bpp file and returns a BMPImage structure.
* Thanks to https://stackoverflow.com/a/9296467
*/
BMPImage readBMP(char *filename)
{
BMPImage bitmap = { 0 };
int size = 0;
BYTE *data = NULL;
FILE *file = fopen(filename, "rb");
// Read the header (expected BGR - 24bpp)
fread(g_info, sizeof(BYTE), HEADER_SIZE, file);
// Get the image width / height from the header
bitmap.width = *((int *)&g_info[18]);
bitmap.height = *((int *)&g_info[22]);
size = *((int *)&g_info[34]);
// Read the image data
data = (BYTE *)malloc(sizeof(BYTE) * size);
fread(data, sizeof(BYTE), size, file);
// Convert the pixel values to float
bitmap.data = (float *)malloc(sizeof(float) * size);
for (int i = 0; i < size; i++)
{
bitmap.data[i] = (float)data[i];
}
fclose(file);
free(data);
return bitmap;
}
/**
* Writes a BMP file in grayscale given its image data and a filename.
*/
void writeBMPGrayscale(int width, int height, float *image, char *filename)
{
FILE *file = NULL;
file = fopen(filename, "wb");
// Write the reference header
fwrite(g_info, sizeof(BYTE), HEADER_SIZE, file);
// Unwrap the 8-bit grayscale into a 24bpp (for simplicity)
for (int h = 0; h < height; h++)
{
int offset = h * width;
for (int w = 0; w < width; w++)
{
BYTE pixel = (BYTE)((image[offset + w] > 255.0f) ? 255.0f :
(image[offset + w] < 0.0f) ? 0.0f :
image[offset + w]);
// Repeat the same pixel value for BGR
fputc(pixel, file);
fputc(pixel, file);
fputc(pixel, file);
}
}
fclose(file);
}
/**
* Releases a given BMPImage.
*/
void freeBMP(BMPImage bitmap)
{
free(bitmap.data);
}
/**
* Checks if there has been any CUDA error. The method will automatically print
* some information and exit the program when an error is found.
*/
void checkCUDAError()
{
hipError_t hipError_t = hipGetLastError();
if (hipError_t != hipSuccess)
{
printf("CUDA Error: Returned %d: %s\n", hipError_t,
hipGetErrorString(hipError_t));
exit(-1);
}
}
/**
* Calculates the elapsed time between two time intervals (in milliseconds).
*/
double get_elapsed(tval t0, tval t1)
{
return (double)(t1.tv_sec - t0.tv_sec) * 1000.0L + (double)(t1.tv_usec - t0.tv_usec) / 1000.0L;
}
/**
* Stores the result image and prints a message.
*/
void store_result(int index, double elapsed_cpu, double elapsed_gpu,
int width, int height, float *image)
{
char path[255];
sprintf(path, "images/hw3_result_%d.bmp", index);
writeBMPGrayscale(width, height, image, path);
printf("Step #%d Completed - Result stored in \"%s\".\n", index, path);
printf("Elapsed CPU: %fms / ", elapsed_cpu);
if (elapsed_gpu == 0)
{
printf("[GPU version not available]\n");
}
else
{
printf("Elapsed GPU: %fms\n", elapsed_gpu);
}
}
/**
* Converts a given 24bpp image into 8bpp grayscale using the CPU.
*/
void cpu_grayscale(int width, int height, float *image, float *image_out)
{
for (int h = 0; h < height; h++)
{
int offset_out = h * width; // 1 color per pixel
int offset = offset_out * 3; // 3 colors per pixel
for (int w = 0; w < width; w++)
{
float *pixel = &image[offset + w * 3];
// Convert to grayscale following the "luminance" model
image_out[offset_out + w] = pixel[0] * 0.0722f + // B
pixel[1] * 0.7152f + // G
pixel[2] * 0.2126f; // R
}
}
}
/**
* Converts a given 24bpp image into 8bpp grayscale using the GPU.
*/
__global__ void gpu_grayscale(int width, int height, float *image, float *image_out)
{
//int i = blockDim.x*blockIdx.x + threadIdx.x;
int w = blockDim.x*blockIdx.x + threadIdx.x;
int h = blockDim.y*blockIdx.y + threadIdx.y;
if (w < width && h < height) {
int offset_out = h * width;
int offset = offset_out * 3;
float *pixel = &image[offset + w * 3];
image_out[offset_out + w] = pixel[0] * 0.0722f + pixel[1] * 0.7152f + pixel[2] * 0.2126f;
}
////////////////
// TO-DO #4.2 /////////////////////////////////////////////
// Implement the GPU version of the grayscale conversion //
///////////////////////////////////////////////////////////
}
/**
* Applies a 3x3 convolution matrix to a pixel using the CPU.
*/
float cpu_applyFilter(float *image, int stride, float *matrix, int filter_dim)
{
float pixel = 0.0f;
for (int h = 0; h < filter_dim; h++)
{
int offset = h * stride;
int offset_kernel = h * filter_dim;
for (int w = 0; w < filter_dim; w++)
{
pixel += image[offset + w] * matrix[offset_kernel + w];
}
}
return pixel;
}
/**
* Applies a 3x3 convolution matrix to a pixel using the GPU.
*/
__device__ float gpu_applyFilter(float *image, int stride, float *matrix, int filter_dim)
{
float pixel = 0.0f;
for (int h = 0; h < filter_dim; h++)
{
int offset = h * stride;
int offset_kernel = h * filter_dim;
for (int w = 0; w < filter_dim; w++)
{
pixel += image[offset + w] * matrix[offset_kernel + w];
}
}
return pixel;
////////////////
// TO-DO #5.2 ////////////////////////////////////////////////
// Implement the GPU version of cpu_applyFilter() //
// //
// Does it make sense to have a separate gpu_applyFilter()? //
//////////////////////////////////////////////////////////////
}
/**
* Applies a Gaussian 3x3 filter to a given image using the CPU.
*/
void cpu_gaussian(int width, int height, float *image, float *image_out)
{
float gaussian[9] = { 1.0f / 16.0f, 2.0f / 16.0f, 1.0f / 16.0f,
2.0f / 16.0f, 4.0f / 16.0f, 2.0f / 16.0f,
1.0f / 16.0f, 2.0f / 16.0f, 1.0f / 16.0f };
for (int h = 0; h < (height - 2); h++)
{
int offset_t = h * width;
int offset = (h + 1) * width;
for (int w = 0; w < (width - 2); w++)
{
image_out[offset + (w + 1)] = cpu_applyFilter(&image[offset_t + w],
width, gaussian, 3);
}
}
}
/**
* Applies a Gaussian 3x3 filter to a given image using the GPU.
*/
__global__ void gpu_gaussian(int width, int height, float *image, float *image_out)
{
__shared__ float sh_block[BLOCK_SIZE_SH * BLOCK_SIZE_SH];
float gaussian[9] = { 1.0f / 16.0f, 2.0f / 16.0f, 1.0f / 16.0f,
2.0f / 16.0f, 4.0f / 16.0f, 2.0f / 16.0f,
1.0f / 16.0f, 2.0f / 16.0f, 1.0f / 16.0f };
//width index
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
//height index
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
//Currently pixel we are processing
int offset_t = index_y * width + index_x;
int offset = (index_y + 1) * width + (index_x + 1);
int sh_block_offset = threadIdx.y * BLOCK_SIZE_SH + threadIdx.x;
bool inBounds = false;
if (index_x < (width - 2) && index_y < (height - 2))
{
sh_block[sh_block_offset] = image[offset_t];
inBounds = true;
}
__syncthreads();
if (inBounds) {
if (threadIdx.x == blockDim.x - 1) {
//Add right pixels
sh_block[sh_block_offset + 1] = image[offset_t + 1];
sh_block[sh_block_offset + 2] = image[offset_t + 2];
}
if (threadIdx.y == blockDim.y - 1) {
//Add top pixels
sh_block[sh_block_offset + BLOCK_SIZE_SH] = image[offset_t + width];
sh_block[sh_block_offset + 2 * BLOCK_SIZE_SH] = image[offset_t + 2 * width];
}
if (threadIdx.x == blockDim.x - 1 && threadIdx.y == blockDim.y - 1) {
sh_block[sh_block_offset + BLOCK_SIZE_SH + 1] = image[offset_t + width + 1];
sh_block[sh_block_offset + BLOCK_SIZE_SH + 2] = image[offset_t + width + 2];
sh_block[sh_block_offset + 2 * BLOCK_SIZE_SH + 1] = image[offset_t + 2 * width + 1];
sh_block[sh_block_offset + 2 * BLOCK_SIZE_SH + 2] = image[offset_t + 2 * width + 2];
}
}
__syncthreads();
if (inBounds) {
image_out[offset] = gpu_applyFilter(&sh_block[sh_block_offset],
BLOCK_SIZE_SH, gaussian, 3);
//Old no shared memory
//image_out[offset] = gpu_applyFilter(&image[offset_t],
// width, gaussian, 3);
}
}
/**
* Calculates the gradient of an image using a Sobel filter on the CPU.
*/
void cpu_sobel(int width, int height, float *image, float *image_out)
{
float sobel_x[9] = { 1.0f, 0.0f, -1.0f,
2.0f, 0.0f, -2.0f,
1.0f, 0.0f, -1.0f };
float sobel_y[9] = { 1.0f, 2.0f, 1.0f,
0.0f, 0.0f, 0.0f,
-1.0f, -2.0f, -1.0f };
for (int h = 0; h < (height - 2); h++)
{
int offset_t = h * width;
int offset = (h + 1) * width;
for (int w = 0; w < (width - 2); w++)
{
float gx = cpu_applyFilter(&image[offset_t + w], width, sobel_x, 3);
float gy = cpu_applyFilter(&image[offset_t + w], width, sobel_y, 3);
// Note: The output can be negative or exceed the max. color value
// of 255. We compensate this afterwards while storing the file.
image_out[offset + (w + 1)] = sqrtf(gx * gx + gy * gy);
}
}
}
/**
* Calculates the gradient of an image using a Sobel filter on the GPU.
*/
__global__ void gpu_sobel(int width, int height, float *image, float *image_out)
{
float sobel_x[9] = { 1.0f, 0.0f, -1.0f,
2.0f, 0.0f, -2.0f,
1.0f, 0.0f, -1.0f };
float sobel_y[9] = { 1.0f, 2.0f, 1.0f,
0.0f, 0.0f, 0.0f,
-1.0f, -2.0f, -1.0f };
__shared__ float sh_block[BLOCK_SIZE_SH * BLOCK_SIZE_SH];
int w = blockIdx.x * blockDim.x + threadIdx.x;
int h = blockIdx.y * blockDim.y + threadIdx.y;
int offset_t = h * width + w;
int offset = (h + 1) * width + w + 1;
int sh_block_offset = threadIdx.y * BLOCK_SIZE_SH + threadIdx.x;
bool inBounds = false;
if (w < (width - 2) && h < (height - 2)) {
sh_block[sh_block_offset] = image[offset_t];
inBounds = true;
}
__syncthreads();
if (inBounds) {
if (threadIdx.x == blockDim.x - 1) {
//Add right pixels
sh_block[sh_block_offset + 1] = image[offset_t + 1];
sh_block[sh_block_offset + 2] = image[offset_t + 2];
}
if (threadIdx.y == blockDim.y - 1) {
//Add top pixels
sh_block[sh_block_offset + BLOCK_SIZE_SH] = image[offset_t + width];
sh_block[sh_block_offset + 2 * BLOCK_SIZE_SH] = image[offset_t + 2 * width];
}
if (threadIdx.x == blockDim.x - 1 && threadIdx.y == blockDim.y - 1) {
sh_block[sh_block_offset + BLOCK_SIZE_SH + 1] = image[offset_t + width + 1];
sh_block[sh_block_offset + BLOCK_SIZE_SH + 2] = image[offset_t + width + 2];
sh_block[sh_block_offset + 2 * BLOCK_SIZE_SH + 1] = image[offset_t + 2 * width + 1];
sh_block[sh_block_offset + 2 * BLOCK_SIZE_SH + 2] = image[offset_t + 2 * width + 2];
}
}
__syncthreads();
if (inBounds) {
float gx = gpu_applyFilter(&sh_block[sh_block_offset],
BLOCK_SIZE_SH, sobel_x, 3);
float gy = gpu_applyFilter(&sh_block[sh_block_offset],
BLOCK_SIZE_SH, sobel_y, 3);
image_out[offset] = sqrtf(gx * gx + gy * gy);
}
//Old
/*
float gx = gpu_applyFilter(&image[offset_t + w], width, sobel_x, 3);
float gy = gpu_applyFilter(&image[offset_t + w], width, sobel_y, 3);
image_out[offset + (w + 1)] = sqrtf(gx * gx + gy * gy);
*/
}
int main(int argc, char **argv)
{
BMPImage bitmap = { 0 };
float *d_bitmap = { 0 };
float *image_out[2] = { 0 };
float *d_image_out[2] = { 0 };
int image_size = 0;
tval t[2] = { 0 };
double elapsed[2] = { 0 };
dim3 grid(1); // The grid will be defined later
dim3 block(BLOCK_SIZE, BLOCK_SIZE); // The block size will not change
// Make sure the filename is provided
if (argc != 2)
{
fprintf(stderr, "Error: The filename is missing!\n");
return -1;
}
// Read the input image and update the grid dimension
bitmap = readBMP(argv[1]);
image_size = bitmap.width * bitmap.height;
grid = dim3(((bitmap.width + (BLOCK_SIZE - 1)) / BLOCK_SIZE),
((bitmap.height + (BLOCK_SIZE - 1)) / BLOCK_SIZE));
printf("Image opened (width=%d height=%d).\n", bitmap.width, bitmap.height);
// Allocate the intermediate image buffers for each step
for (int i = 0; i < 2; i++)
{
image_out[i] = (float *)calloc(image_size, sizeof(float));
hipMalloc(&d_image_out[i], image_size * sizeof(float));
hipMemset(d_image_out[i], 0, image_size * sizeof(float));
}
hipMalloc(&d_bitmap, image_size * sizeof(float) * 3);
hipMemcpy(d_bitmap, bitmap.data,
image_size * sizeof(float) * 3, hipMemcpyHostToDevice);
// Step 1: Convert to grayscale
{
// Launch the CPU version
gettimeofday(&t[0], NULL);
//cpu_grayscale(bitmap.width, bitmap.height, bitmap.data, image_out[0]);
gettimeofday(&t[1], NULL);
elapsed[0] = get_elapsed(t[0], t[1]);
// Launch the GPU version
gettimeofday(&t[0], NULL);
gpu_grayscale << <grid, block >> > (bitmap.width, bitmap.height,
d_bitmap, d_image_out[0]);
hipMemcpy(image_out[0], d_image_out[0],
image_size * sizeof(float), hipMemcpyDeviceToHost);
gettimeofday(&t[1], NULL);
elapsed[1] = get_elapsed(t[0], t[1]);
// Store the result image in grayscale
store_result(1, elapsed[0], elapsed[1], bitmap.width, bitmap.height, image_out[0]);
}
// Step 2: Apply a 3x3 Gaussian filter
{
// Launch the CPU version
gettimeofday(&t[0], NULL);
//cpu_gaussian(bitmap.width, bitmap.height, image_out[0], image_out[1]);
gettimeofday(&t[1], NULL);
elapsed[0] = get_elapsed(t[0], t[1]);
// Launch the GPU version
gettimeofday(&t[0], NULL);
gpu_gaussian << <grid, block >> > (bitmap.width, bitmap.height,
d_image_out[0], d_image_out[1]);
hipMemcpy(image_out[1], d_image_out[1],
image_size * sizeof(float), hipMemcpyDeviceToHost);
gettimeofday(&t[1], NULL);
elapsed[1] = get_elapsed(t[0], t[1]);
// Store the result image with the Gaussian filter applied
store_result(2, elapsed[0], elapsed[1], bitmap.width, bitmap.height, image_out[1]);
}
// Step 3: Apply a Sobel filter
{
// Launch the CPU version
gettimeofday(&t[0], NULL);
//cpu_sobel(bitmap.width, bitmap.height, image_out[1], image_out[0]);
gettimeofday(&t[1], NULL);
elapsed[0] = get_elapsed(t[0], t[1]);
// Launch the GPU version
gettimeofday(&t[0], NULL);
gpu_sobel << <grid, block >> > (bitmap.width, bitmap.height,
d_image_out[1], d_image_out[0]);
hipMemcpy(image_out[0], d_image_out[0],
image_size * sizeof(float), hipMemcpyDeviceToHost);
gettimeofday(&t[1], NULL);
elapsed[1] = get_elapsed(t[0], t[1]);
// Store the final result image with the Sobel filter applied
store_result(3, elapsed[0], elapsed[1], bitmap.width, bitmap.height, image_out[0]);
}
// Release the allocated memory
for (int i = 0; i < 2; i++)
{
free(image_out[i]);
hipFree(d_image_out[i]);
}
freeBMP(bitmap);
hipFree(d_bitmap);
return 0;
}
| 56862a0dc9dcfbc7df20cb4f74bbc0bc9d8db6e1.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#define BLOCK_SIZE 16
#define BLOCK_SIZE_SH 18
#define HEADER_SIZE 122
typedef unsigned char BYTE;
/**
* Structure that represents a BMP image.
*/
typedef struct
{
int width;
int height;
float *data;
} BMPImage;
typedef struct timeval tval;
BYTE g_info[HEADER_SIZE]; // Reference header
/**
* Reads a BMP 24bpp file and returns a BMPImage structure.
* Thanks to https://stackoverflow.com/a/9296467
*/
BMPImage readBMP(char *filename)
{
BMPImage bitmap = { 0 };
int size = 0;
BYTE *data = NULL;
FILE *file = fopen(filename, "rb");
// Read the header (expected BGR - 24bpp)
fread(g_info, sizeof(BYTE), HEADER_SIZE, file);
// Get the image width / height from the header
bitmap.width = *((int *)&g_info[18]);
bitmap.height = *((int *)&g_info[22]);
size = *((int *)&g_info[34]);
// Read the image data
data = (BYTE *)malloc(sizeof(BYTE) * size);
fread(data, sizeof(BYTE), size, file);
// Convert the pixel values to float
bitmap.data = (float *)malloc(sizeof(float) * size);
for (int i = 0; i < size; i++)
{
bitmap.data[i] = (float)data[i];
}
fclose(file);
free(data);
return bitmap;
}
/**
* Writes a BMP file in grayscale given its image data and a filename.
*/
void writeBMPGrayscale(int width, int height, float *image, char *filename)
{
FILE *file = NULL;
file = fopen(filename, "wb");
// Write the reference header
fwrite(g_info, sizeof(BYTE), HEADER_SIZE, file);
// Unwrap the 8-bit grayscale into a 24bpp (for simplicity)
for (int h = 0; h < height; h++)
{
int offset = h * width;
for (int w = 0; w < width; w++)
{
BYTE pixel = (BYTE)((image[offset + w] > 255.0f) ? 255.0f :
(image[offset + w] < 0.0f) ? 0.0f :
image[offset + w]);
// Repeat the same pixel value for BGR
fputc(pixel, file);
fputc(pixel, file);
fputc(pixel, file);
}
}
fclose(file);
}
/**
* Releases a given BMPImage.
*/
void freeBMP(BMPImage bitmap)
{
free(bitmap.data);
}
/**
* Checks if there has been any CUDA error. The method will automatically print
* some information and exit the program when an error is found.
*/
void checkCUDAError()
{
cudaError_t cudaError = cudaGetLastError();
if (cudaError != cudaSuccess)
{
printf("CUDA Error: Returned %d: %s\n", cudaError,
cudaGetErrorString(cudaError));
exit(-1);
}
}
/**
* Calculates the elapsed time between two time intervals (in milliseconds).
*/
double get_elapsed(tval t0, tval t1)
{
return (double)(t1.tv_sec - t0.tv_sec) * 1000.0L + (double)(t1.tv_usec - t0.tv_usec) / 1000.0L;
}
/**
* Stores the result image and prints a message.
*/
void store_result(int index, double elapsed_cpu, double elapsed_gpu,
int width, int height, float *image)
{
char path[255];
sprintf(path, "images/hw3_result_%d.bmp", index);
writeBMPGrayscale(width, height, image, path);
printf("Step #%d Completed - Result stored in \"%s\".\n", index, path);
printf("Elapsed CPU: %fms / ", elapsed_cpu);
if (elapsed_gpu == 0)
{
printf("[GPU version not available]\n");
}
else
{
printf("Elapsed GPU: %fms\n", elapsed_gpu);
}
}
/**
* Converts a given 24bpp image into 8bpp grayscale using the CPU.
*/
void cpu_grayscale(int width, int height, float *image, float *image_out)
{
for (int h = 0; h < height; h++)
{
int offset_out = h * width; // 1 color per pixel
int offset = offset_out * 3; // 3 colors per pixel
for (int w = 0; w < width; w++)
{
float *pixel = &image[offset + w * 3];
// Convert to grayscale following the "luminance" model
image_out[offset_out + w] = pixel[0] * 0.0722f + // B
pixel[1] * 0.7152f + // G
pixel[2] * 0.2126f; // R
}
}
}
/**
* Converts a given 24bpp image into 8bpp grayscale using the GPU.
*/
__global__ void gpu_grayscale(int width, int height, float *image, float *image_out)
{
//int i = blockDim.x*blockIdx.x + threadIdx.x;
int w = blockDim.x*blockIdx.x + threadIdx.x;
int h = blockDim.y*blockIdx.y + threadIdx.y;
if (w < width && h < height) {
int offset_out = h * width;
int offset = offset_out * 3;
float *pixel = &image[offset + w * 3];
image_out[offset_out + w] = pixel[0] * 0.0722f + pixel[1] * 0.7152f + pixel[2] * 0.2126f;
}
////////////////
// TO-DO #4.2 /////////////////////////////////////////////
// Implement the GPU version of the grayscale conversion //
///////////////////////////////////////////////////////////
}
/**
* Applies a 3x3 convolution matrix to a pixel using the CPU.
*/
float cpu_applyFilter(float *image, int stride, float *matrix, int filter_dim)
{
float pixel = 0.0f;
for (int h = 0; h < filter_dim; h++)
{
int offset = h * stride;
int offset_kernel = h * filter_dim;
for (int w = 0; w < filter_dim; w++)
{
pixel += image[offset + w] * matrix[offset_kernel + w];
}
}
return pixel;
}
/**
* Applies a 3x3 convolution matrix to a pixel using the GPU.
*/
__device__ float gpu_applyFilter(float *image, int stride, float *matrix, int filter_dim)
{
float pixel = 0.0f;
for (int h = 0; h < filter_dim; h++)
{
int offset = h * stride;
int offset_kernel = h * filter_dim;
for (int w = 0; w < filter_dim; w++)
{
pixel += image[offset + w] * matrix[offset_kernel + w];
}
}
return pixel;
////////////////
// TO-DO #5.2 ////////////////////////////////////////////////
// Implement the GPU version of cpu_applyFilter() //
// //
// Does it make sense to have a separate gpu_applyFilter()? //
//////////////////////////////////////////////////////////////
}
/**
* Applies a Gaussian 3x3 filter to a given image using the CPU.
*/
void cpu_gaussian(int width, int height, float *image, float *image_out)
{
float gaussian[9] = { 1.0f / 16.0f, 2.0f / 16.0f, 1.0f / 16.0f,
2.0f / 16.0f, 4.0f / 16.0f, 2.0f / 16.0f,
1.0f / 16.0f, 2.0f / 16.0f, 1.0f / 16.0f };
for (int h = 0; h < (height - 2); h++)
{
int offset_t = h * width;
int offset = (h + 1) * width;
for (int w = 0; w < (width - 2); w++)
{
image_out[offset + (w + 1)] = cpu_applyFilter(&image[offset_t + w],
width, gaussian, 3);
}
}
}
/**
* Applies a Gaussian 3x3 filter to a given image using the GPU.
*/
__global__ void gpu_gaussian(int width, int height, float *image, float *image_out)
{
__shared__ float sh_block[BLOCK_SIZE_SH * BLOCK_SIZE_SH];
float gaussian[9] = { 1.0f / 16.0f, 2.0f / 16.0f, 1.0f / 16.0f,
2.0f / 16.0f, 4.0f / 16.0f, 2.0f / 16.0f,
1.0f / 16.0f, 2.0f / 16.0f, 1.0f / 16.0f };
//width index
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
//height index
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
//Currently pixel we are processing
int offset_t = index_y * width + index_x;
int offset = (index_y + 1) * width + (index_x + 1);
int sh_block_offset = threadIdx.y * BLOCK_SIZE_SH + threadIdx.x;
bool inBounds = false;
if (index_x < (width - 2) && index_y < (height - 2))
{
sh_block[sh_block_offset] = image[offset_t];
inBounds = true;
}
__syncthreads();
if (inBounds) {
if (threadIdx.x == blockDim.x - 1) {
//Add right pixels
sh_block[sh_block_offset + 1] = image[offset_t + 1];
sh_block[sh_block_offset + 2] = image[offset_t + 2];
}
if (threadIdx.y == blockDim.y - 1) {
//Add top pixels
sh_block[sh_block_offset + BLOCK_SIZE_SH] = image[offset_t + width];
sh_block[sh_block_offset + 2 * BLOCK_SIZE_SH] = image[offset_t + 2 * width];
}
if (threadIdx.x == blockDim.x - 1 && threadIdx.y == blockDim.y - 1) {
sh_block[sh_block_offset + BLOCK_SIZE_SH + 1] = image[offset_t + width + 1];
sh_block[sh_block_offset + BLOCK_SIZE_SH + 2] = image[offset_t + width + 2];
sh_block[sh_block_offset + 2 * BLOCK_SIZE_SH + 1] = image[offset_t + 2 * width + 1];
sh_block[sh_block_offset + 2 * BLOCK_SIZE_SH + 2] = image[offset_t + 2 * width + 2];
}
}
__syncthreads();
if (inBounds) {
image_out[offset] = gpu_applyFilter(&sh_block[sh_block_offset],
BLOCK_SIZE_SH, gaussian, 3);
//Old no shared memory
//image_out[offset] = gpu_applyFilter(&image[offset_t],
// width, gaussian, 3);
}
}
/**
* Calculates the gradient of an image using a Sobel filter on the CPU.
*/
void cpu_sobel(int width, int height, float *image, float *image_out)
{
float sobel_x[9] = { 1.0f, 0.0f, -1.0f,
2.0f, 0.0f, -2.0f,
1.0f, 0.0f, -1.0f };
float sobel_y[9] = { 1.0f, 2.0f, 1.0f,
0.0f, 0.0f, 0.0f,
-1.0f, -2.0f, -1.0f };
for (int h = 0; h < (height - 2); h++)
{
int offset_t = h * width;
int offset = (h + 1) * width;
for (int w = 0; w < (width - 2); w++)
{
float gx = cpu_applyFilter(&image[offset_t + w], width, sobel_x, 3);
float gy = cpu_applyFilter(&image[offset_t + w], width, sobel_y, 3);
// Note: The output can be negative or exceed the max. color value
// of 255. We compensate this afterwards while storing the file.
image_out[offset + (w + 1)] = sqrtf(gx * gx + gy * gy);
}
}
}
/**
* Calculates the gradient of an image using a Sobel filter on the GPU.
*/
__global__ void gpu_sobel(int width, int height, float *image, float *image_out)
{
float sobel_x[9] = { 1.0f, 0.0f, -1.0f,
2.0f, 0.0f, -2.0f,
1.0f, 0.0f, -1.0f };
float sobel_y[9] = { 1.0f, 2.0f, 1.0f,
0.0f, 0.0f, 0.0f,
-1.0f, -2.0f, -1.0f };
__shared__ float sh_block[BLOCK_SIZE_SH * BLOCK_SIZE_SH];
int w = blockIdx.x * blockDim.x + threadIdx.x;
int h = blockIdx.y * blockDim.y + threadIdx.y;
int offset_t = h * width + w;
int offset = (h + 1) * width + w + 1;
int sh_block_offset = threadIdx.y * BLOCK_SIZE_SH + threadIdx.x;
bool inBounds = false;
if (w < (width - 2) && h < (height - 2)) {
sh_block[sh_block_offset] = image[offset_t];
inBounds = true;
}
__syncthreads();
if (inBounds) {
if (threadIdx.x == blockDim.x - 1) {
//Add right pixels
sh_block[sh_block_offset + 1] = image[offset_t + 1];
sh_block[sh_block_offset + 2] = image[offset_t + 2];
}
if (threadIdx.y == blockDim.y - 1) {
//Add top pixels
sh_block[sh_block_offset + BLOCK_SIZE_SH] = image[offset_t + width];
sh_block[sh_block_offset + 2 * BLOCK_SIZE_SH] = image[offset_t + 2 * width];
}
if (threadIdx.x == blockDim.x - 1 && threadIdx.y == blockDim.y - 1) {
sh_block[sh_block_offset + BLOCK_SIZE_SH + 1] = image[offset_t + width + 1];
sh_block[sh_block_offset + BLOCK_SIZE_SH + 2] = image[offset_t + width + 2];
sh_block[sh_block_offset + 2 * BLOCK_SIZE_SH + 1] = image[offset_t + 2 * width + 1];
sh_block[sh_block_offset + 2 * BLOCK_SIZE_SH + 2] = image[offset_t + 2 * width + 2];
}
}
__syncthreads();
if (inBounds) {
float gx = gpu_applyFilter(&sh_block[sh_block_offset],
BLOCK_SIZE_SH, sobel_x, 3);
float gy = gpu_applyFilter(&sh_block[sh_block_offset],
BLOCK_SIZE_SH, sobel_y, 3);
image_out[offset] = sqrtf(gx * gx + gy * gy);
}
//Old
/*
float gx = gpu_applyFilter(&image[offset_t + w], width, sobel_x, 3);
float gy = gpu_applyFilter(&image[offset_t + w], width, sobel_y, 3);
image_out[offset + (w + 1)] = sqrtf(gx * gx + gy * gy);
*/
}
int main(int argc, char **argv)
{
BMPImage bitmap = { 0 };
float *d_bitmap = { 0 };
float *image_out[2] = { 0 };
float *d_image_out[2] = { 0 };
int image_size = 0;
tval t[2] = { 0 };
double elapsed[2] = { 0 };
dim3 grid(1); // The grid will be defined later
dim3 block(BLOCK_SIZE, BLOCK_SIZE); // The block size will not change
// Make sure the filename is provided
if (argc != 2)
{
fprintf(stderr, "Error: The filename is missing!\n");
return -1;
}
// Read the input image and update the grid dimension
bitmap = readBMP(argv[1]);
image_size = bitmap.width * bitmap.height;
grid = dim3(((bitmap.width + (BLOCK_SIZE - 1)) / BLOCK_SIZE),
((bitmap.height + (BLOCK_SIZE - 1)) / BLOCK_SIZE));
printf("Image opened (width=%d height=%d).\n", bitmap.width, bitmap.height);
// Allocate the intermediate image buffers for each step
for (int i = 0; i < 2; i++)
{
image_out[i] = (float *)calloc(image_size, sizeof(float));
cudaMalloc(&d_image_out[i], image_size * sizeof(float));
cudaMemset(d_image_out[i], 0, image_size * sizeof(float));
}
cudaMalloc(&d_bitmap, image_size * sizeof(float) * 3);
cudaMemcpy(d_bitmap, bitmap.data,
image_size * sizeof(float) * 3, cudaMemcpyHostToDevice);
// Step 1: Convert to grayscale
{
// Launch the CPU version
gettimeofday(&t[0], NULL);
//cpu_grayscale(bitmap.width, bitmap.height, bitmap.data, image_out[0]);
gettimeofday(&t[1], NULL);
elapsed[0] = get_elapsed(t[0], t[1]);
// Launch the GPU version
gettimeofday(&t[0], NULL);
gpu_grayscale << <grid, block >> > (bitmap.width, bitmap.height,
d_bitmap, d_image_out[0]);
cudaMemcpy(image_out[0], d_image_out[0],
image_size * sizeof(float), cudaMemcpyDeviceToHost);
gettimeofday(&t[1], NULL);
elapsed[1] = get_elapsed(t[0], t[1]);
// Store the result image in grayscale
store_result(1, elapsed[0], elapsed[1], bitmap.width, bitmap.height, image_out[0]);
}
// Step 2: Apply a 3x3 Gaussian filter
{
// Launch the CPU version
gettimeofday(&t[0], NULL);
//cpu_gaussian(bitmap.width, bitmap.height, image_out[0], image_out[1]);
gettimeofday(&t[1], NULL);
elapsed[0] = get_elapsed(t[0], t[1]);
// Launch the GPU version
gettimeofday(&t[0], NULL);
gpu_gaussian << <grid, block >> > (bitmap.width, bitmap.height,
d_image_out[0], d_image_out[1]);
cudaMemcpy(image_out[1], d_image_out[1],
image_size * sizeof(float), cudaMemcpyDeviceToHost);
gettimeofday(&t[1], NULL);
elapsed[1] = get_elapsed(t[0], t[1]);
// Store the result image with the Gaussian filter applied
store_result(2, elapsed[0], elapsed[1], bitmap.width, bitmap.height, image_out[1]);
}
// Step 3: Apply a Sobel filter
{
// Launch the CPU version
gettimeofday(&t[0], NULL);
//cpu_sobel(bitmap.width, bitmap.height, image_out[1], image_out[0]);
gettimeofday(&t[1], NULL);
elapsed[0] = get_elapsed(t[0], t[1]);
// Launch the GPU version
gettimeofday(&t[0], NULL);
gpu_sobel << <grid, block >> > (bitmap.width, bitmap.height,
d_image_out[1], d_image_out[0]);
cudaMemcpy(image_out[0], d_image_out[0],
image_size * sizeof(float), cudaMemcpyDeviceToHost);
gettimeofday(&t[1], NULL);
elapsed[1] = get_elapsed(t[0], t[1]);
// Store the final result image with the Sobel filter applied
store_result(3, elapsed[0], elapsed[1], bitmap.width, bitmap.height, image_out[0]);
}
// Release the allocated memory
for (int i = 0; i < 2; i++)
{
free(image_out[i]);
cudaFree(d_image_out[i]);
}
freeBMP(bitmap);
cudaFree(d_bitmap);
return 0;
}
|
9f434a5d2b91ba0c7823b58ab65f7605e7517728.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pairwise_transform.h"
__device__ double op(double d1,double d2,double *params) {
return d2 / d1;
}
__device__ double op(double d1,double *params) {
return d1;
}
extern "C"
__global__ void rdiv_strided_double(int n, int xOffset,int yOffset,double *dx, double *dy,int incx,int incy,double *params,double *result) {
transform(n,xOffset,yOffset,dx,dy,incx,incy,params,result);
}
| 9f434a5d2b91ba0c7823b58ab65f7605e7517728.cu | #include "pairwise_transform.h"
__device__ double op(double d1,double d2,double *params) {
return d2 / d1;
}
__device__ double op(double d1,double *params) {
return d1;
}
extern "C"
__global__ void rdiv_strided_double(int n, int xOffset,int yOffset,double *dx, double *dy,int incx,int incy,double *params,double *result) {
transform(n,xOffset,yOffset,dx,dy,incx,incy,params,result);
}
|
5dd0555e61e8cd66519a138ff097d4089ccc314a.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <cstdlib>
#include <time.h>
#include <stdio.h>
#include <fstream>
#include <sys/time.h>
using namespace std;
__global__ void foldkernel(bool* a, bool*c, int n, int split){
int threadid = blockIdx.x*blockDim.x+ threadIdx.x;
int i = (blockIdx.x*blockDim.x+ threadIdx.x)*split;
for(int k = 0; k < split; k++){
if(i+k >= n)
break;
for(int j = 0; j < n; j++){
c[threadid*n+j] |= a[(i+k)*n+j];
}
}
}
void print(bool* a, int m, int n){
for(int i=0;i<m;i++){
for(int j=0;j<n;j++)
cout << a[i*n+j] << '\t';
cout << endl;
}
cout << endl;
}
int main(int argc, char* argv[]){
//Initialisation variables
ifstream in;
in.open(argv[1]); //data_file
int n = atoi(argv[2]); //dimension of bitmat
int split = atoi(argv[3]);
int iterations = atoi(argv[4]); //Iterations to compare the results
//clock variables
timeval time;
int start, end;
double gpu_time_used,cpu_time_used;
//Threads and block configuration
int rows_res = (n+split-1)/split;
int threadsPerBlock = 512;
long long int numBlocks = (rows_res+threadsPerBlock-1)/threadsPerBlock;
//sizes of matrices
int size_mat = sizeof(bool) * n * n;
int size_res = sizeof(bool) * (rows_res) * n;
int size_mask = sizeof(bool) * n;
//memory allocation in host machine
bool* mat = (bool*)malloc(size_mat);
bool* res = (bool*)malloc(size_res);
bool* mask = (bool*)malloc(size_mask);
//Initializing matrices
for(int i = 0; i < n; i++){
for(int j = 0; j < n; j++)
in >> mat[i*n+j];
}
for(int i = 0; i < rows_res; i++)
for(int j = 0; j < n; j++)
res[i*n+j]=false;
for(int i = 0; i < n; i++)
mask[i]=false;
bool *d_mat,*d_res;
start=gettimeofday();
//Memory allocation in GPU
hipMalloc((void**)&d_mat, size_mat);
hipMalloc((void**)&d_res, size_res);
//copy data from host to GPU
hipMemcpy(d_mat, mat, size_mat, hipMemcpyHostToDevice);
hipMemcpy(d_res, res, size_res, hipMemcpyHostToDevice);
for(int k = 0; k < iterations; k++)
hipLaunchKernelGGL(( foldkernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_mat, d_res, n, split);
//copying result
hipMemcpy(res, d_res, size_res, hipMemcpyDeviceToHost);
//computing final result
for(int k = 0; k < iterations; k++){
for(int i = 0; i < rows_res; i++){
for(int j = 0; j < n; j++)
mask[j] |= res[i*n+j];
}
}
end = gettimeofday();
//calculating time taken by GPU
gpu_time_used = ((double)(end-start))/CLOCKS_PER_SEC;
//CPU computation
start= gettimeofday();
for(int k = 0; k < iterations; k++){
for(int i = 0; i < n; i++){
for(int j = 0; j < n; j++)
mask[j] |= mat[i*n+j];
}
}
end = gettimeofday();
//calculating time taken by CPU
cpu_time_used = ((double)(end-start))/CLOCKS_PER_SEC;
cout << "CPU/GPU: " << cpu_time_used/gpu_time_used << endl;
return 0;
}
| 5dd0555e61e8cd66519a138ff097d4089ccc314a.cu | #include <iostream>
#include <cuda.h>
#include <cstdlib>
#include <time.h>
#include <stdio.h>
#include <fstream>
#include <sys/time.h>
using namespace std;
__global__ void foldkernel(bool* a, bool*c, int n, int split){
int threadid = blockIdx.x*blockDim.x+ threadIdx.x;
int i = (blockIdx.x*blockDim.x+ threadIdx.x)*split;
for(int k = 0; k < split; k++){
if(i+k >= n)
break;
for(int j = 0; j < n; j++){
c[threadid*n+j] |= a[(i+k)*n+j];
}
}
}
void print(bool* a, int m, int n){
for(int i=0;i<m;i++){
for(int j=0;j<n;j++)
cout << a[i*n+j] << '\t';
cout << endl;
}
cout << endl;
}
int main(int argc, char* argv[]){
//Initialisation variables
ifstream in;
in.open(argv[1]); //data_file
int n = atoi(argv[2]); //dimension of bitmat
int split = atoi(argv[3]);
int iterations = atoi(argv[4]); //Iterations to compare the results
//clock variables
timeval time;
int start, end;
double gpu_time_used,cpu_time_used;
//Threads and block configuration
int rows_res = (n+split-1)/split;
int threadsPerBlock = 512;
long long int numBlocks = (rows_res+threadsPerBlock-1)/threadsPerBlock;
//sizes of matrices
int size_mat = sizeof(bool) * n * n;
int size_res = sizeof(bool) * (rows_res) * n;
int size_mask = sizeof(bool) * n;
//memory allocation in host machine
bool* mat = (bool*)malloc(size_mat);
bool* res = (bool*)malloc(size_res);
bool* mask = (bool*)malloc(size_mask);
//Initializing matrices
for(int i = 0; i < n; i++){
for(int j = 0; j < n; j++)
in >> mat[i*n+j];
}
for(int i = 0; i < rows_res; i++)
for(int j = 0; j < n; j++)
res[i*n+j]=false;
for(int i = 0; i < n; i++)
mask[i]=false;
bool *d_mat,*d_res;
start=gettimeofday();
//Memory allocation in GPU
cudaMalloc((void**)&d_mat, size_mat);
cudaMalloc((void**)&d_res, size_res);
//copy data from host to GPU
cudaMemcpy(d_mat, mat, size_mat, cudaMemcpyHostToDevice);
cudaMemcpy(d_res, res, size_res, cudaMemcpyHostToDevice);
for(int k = 0; k < iterations; k++)
foldkernel<<<numBlocks, threadsPerBlock>>>(d_mat, d_res, n, split);
//copying result
cudaMemcpy(res, d_res, size_res, cudaMemcpyDeviceToHost);
//computing final result
for(int k = 0; k < iterations; k++){
for(int i = 0; i < rows_res; i++){
for(int j = 0; j < n; j++)
mask[j] |= res[i*n+j];
}
}
end = gettimeofday();
//calculating time taken by GPU
gpu_time_used = ((double)(end-start))/CLOCKS_PER_SEC;
//CPU computation
start= gettimeofday();
for(int k = 0; k < iterations; k++){
for(int i = 0; i < n; i++){
for(int j = 0; j < n; j++)
mask[j] |= mat[i*n+j];
}
}
end = gettimeofday();
//calculating time taken by CPU
cpu_time_used = ((double)(end-start))/CLOCKS_PER_SEC;
cout << "CPU/GPU: " << cpu_time_used/gpu_time_used << endl;
return 0;
}
|
a59a343c67364dc42da028057e85b26a5f1599e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "reorder.cuh"
#include "utils.cuh"
#include <thrust/gather.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <ATen/hip/HIPBlas.h>
using namespace torch::indexing;
torch::Tensor gather_int32(const torch::Tensor& map_tensor, const torch::Tensor& input_tensor)
{
int output_size = map_tensor.size(0);
torch::Tensor output_tensor = torch::empty({output_size}, input_tensor.options());
int32_t *map = map_tensor.data_ptr<int32_t>();
int32_t *input = input_tensor.data_ptr<int32_t>();
int32_t *output = output_tensor.data_ptr<int32_t>();
thrust::gather(thrust::device, map, map + output_size, input, output);
return output_tensor;
}
void sort_by_key_int16_int64(const torch::Tensor& keys_tensor, const torch::Tensor& values_tensor)
{
int length = keys_tensor.size(0);
int16_t *keys = keys_tensor.data_ptr<int16_t>();
int64_t *values = values_tensor.data_ptr<int64_t>();
thrust::sort_by_key(thrust::device, keys, keys + length, values);
}
void sort_by_key_int16_int32(const torch::Tensor& keys_tensor, const torch::Tensor& values_tensor)
{
int length = keys_tensor.size(0);
int16_t *keys = keys_tensor.data_ptr<int16_t>();
int32_t *values = values_tensor.data_ptr<int32_t>();
thrust::sort_by_key(thrust::device, keys, keys + length, values);
}
torch::Tensor scatter_int32_float4(const torch::Tensor& map_tensor, const torch::Tensor& input_tensor)
{
int input_size = input_tensor.size(0);
torch::Tensor output_tensor = torch::empty({input_size, 4}, input_tensor.options());
int32_t *map = map_tensor.data_ptr<int32_t>();
float4 *input = (float4*)input_tensor.data_ptr<float>();
float4 *output = (float4*)output_tensor.data_ptr<float>();
thrust::scatter(thrust::device, input, input + input_size, map, output);
return output_tensor;
}
| a59a343c67364dc42da028057e85b26a5f1599e3.cu |
#include "reorder.cuh"
#include "utils.cuh"
#include <thrust/gather.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <ATen/cuda/CUDABlas.h>
using namespace torch::indexing;
torch::Tensor gather_int32(const torch::Tensor& map_tensor, const torch::Tensor& input_tensor)
{
int output_size = map_tensor.size(0);
torch::Tensor output_tensor = torch::empty({output_size}, input_tensor.options());
int32_t *map = map_tensor.data_ptr<int32_t>();
int32_t *input = input_tensor.data_ptr<int32_t>();
int32_t *output = output_tensor.data_ptr<int32_t>();
thrust::gather(thrust::device, map, map + output_size, input, output);
return output_tensor;
}
void sort_by_key_int16_int64(const torch::Tensor& keys_tensor, const torch::Tensor& values_tensor)
{
int length = keys_tensor.size(0);
int16_t *keys = keys_tensor.data_ptr<int16_t>();
int64_t *values = values_tensor.data_ptr<int64_t>();
thrust::sort_by_key(thrust::device, keys, keys + length, values);
}
void sort_by_key_int16_int32(const torch::Tensor& keys_tensor, const torch::Tensor& values_tensor)
{
int length = keys_tensor.size(0);
int16_t *keys = keys_tensor.data_ptr<int16_t>();
int32_t *values = values_tensor.data_ptr<int32_t>();
thrust::sort_by_key(thrust::device, keys, keys + length, values);
}
torch::Tensor scatter_int32_float4(const torch::Tensor& map_tensor, const torch::Tensor& input_tensor)
{
int input_size = input_tensor.size(0);
torch::Tensor output_tensor = torch::empty({input_size, 4}, input_tensor.options());
int32_t *map = map_tensor.data_ptr<int32_t>();
float4 *input = (float4*)input_tensor.data_ptr<float>();
float4 *output = (float4*)output_tensor.data_ptr<float>();
thrust::scatter(thrust::device, input, input + input_size, map, output);
return output_tensor;
}
|
c2c4707fee17312174ba491a869d2af40c246245.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gtest/gtest.h"
#include <hip/hip_runtime_api.h>
#include <cstring>
#include <cudf.h>
#include <rmm/rmm.h>
// If this test fails, it means an error code was added without
// adding support to gdf_error_get_name().
TEST(GdfInternalTest, NameEveryError) {
for (int i = 0; i < N_GDF_ERRORS; i++)
{
const char *res = gdf_error_get_name((gdf_error)i);
ASSERT_EQ(0, strstr(res, "Unknown error"));
}
}
| c2c4707fee17312174ba491a869d2af40c246245.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gtest/gtest.h"
#include <cuda_runtime_api.h>
#include <cstring>
#include <cudf.h>
#include <rmm/rmm.h>
// If this test fails, it means an error code was added without
// adding support to gdf_error_get_name().
TEST(GdfInternalTest, NameEveryError) {
for (int i = 0; i < N_GDF_ERRORS; i++)
{
const char *res = gdf_error_get_name((gdf_error)i);
ASSERT_EQ(0, strstr(res, "Unknown error"));
}
}
|
27f1a4fde0db436cae68cf8b1d731e13f7ed2011.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include <float.h>
#include "hip/hip_runtime.h"
#include "utility/src/utils.cuh"
DREAMPLACE_BEGIN_NAMESPACE
template <typename T>
__global__ void computeHPWL(
const T* x,
const int* flat_netpin,
const int* netpin_start,
const unsigned char* net_mask,
int num_nets,
T* partial_hpwl
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_nets)
{
T max_x = -FLT_MAX;
T min_x = FLT_MAX;
if (net_mask[i])
{
for (int j = netpin_start[i]; j < netpin_start[i+1]; ++j)
{
min_x = min(min_x, x[flat_netpin[j]]);
max_x = max(max_x, x[flat_netpin[j]]);
}
partial_hpwl[i] = max_x-min_x;
}
else
{
partial_hpwl[i] = 0;
}
}
}
template <typename T>
int computeHPWLCudaLauncher(
const T* x, const T* y,
const int* flat_netpin,
const int* netpin_start,
const unsigned char* net_mask,
int num_nets,
T* partial_hpwl
)
{
const int thread_count = 512;
const int block_count_nets = (num_nets + thread_count - 1) / thread_count;
hipError_t status;
hipStream_t stream_y;
status = hipStreamCreate(&stream_y);
if (status != hipSuccess)
{
printf("hipStreamCreate failed for stream_y\n");
fflush(stdout);
return 1;
}
hipLaunchKernelGGL(( computeHPWL), dim3(block_count_nets), dim3(thread_count), 0, 0,
x,
flat_netpin,
netpin_start,
net_mask,
num_nets,
partial_hpwl
);
hipLaunchKernelGGL(( computeHPWL), dim3(block_count_nets), dim3(thread_count), 0, stream_y,
y,
flat_netpin,
netpin_start,
net_mask,
num_nets,
partial_hpwl+num_nets
);
/* destroy stream */
status = hipStreamDestroy(stream_y);
if (status != hipSuccess)
{
printf("stream_y destroy failed\n");
fflush(stdout);
return 1;
}
//printArray(partial_hpwl, num_nets, "partial_hpwl");
// I move out the summation to use ATen
// significant speedup is observed
//sumArray<<<1, 1>>>(partial_hpwl, num_nets, hpwl);
return 0;
}
// manually instantiate the template function
#define REGISTER_KERNEL_LAUNCHER(type) \
template int computeHPWLCudaLauncher<type>(\
const type* x, const type* y, \
const int* flat_netpin, \
const int* netpin_start, \
const unsigned char* net_mask, \
int num_nets, \
type* partial_hpwl \
);
REGISTER_KERNEL_LAUNCHER(float);
REGISTER_KERNEL_LAUNCHER(double);
DREAMPLACE_END_NAMESPACE
| 27f1a4fde0db436cae68cf8b1d731e13f7ed2011.cu | #include <stdio.h>
#include <math.h>
#include <float.h>
#include "cuda_runtime.h"
#include "utility/src/utils.cuh"
DREAMPLACE_BEGIN_NAMESPACE
template <typename T>
__global__ void computeHPWL(
const T* x,
const int* flat_netpin,
const int* netpin_start,
const unsigned char* net_mask,
int num_nets,
T* partial_hpwl
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < num_nets)
{
T max_x = -FLT_MAX;
T min_x = FLT_MAX;
if (net_mask[i])
{
for (int j = netpin_start[i]; j < netpin_start[i+1]; ++j)
{
min_x = min(min_x, x[flat_netpin[j]]);
max_x = max(max_x, x[flat_netpin[j]]);
}
partial_hpwl[i] = max_x-min_x;
}
else
{
partial_hpwl[i] = 0;
}
}
}
template <typename T>
int computeHPWLCudaLauncher(
const T* x, const T* y,
const int* flat_netpin,
const int* netpin_start,
const unsigned char* net_mask,
int num_nets,
T* partial_hpwl
)
{
const int thread_count = 512;
const int block_count_nets = (num_nets + thread_count - 1) / thread_count;
cudaError_t status;
cudaStream_t stream_y;
status = cudaStreamCreate(&stream_y);
if (status != cudaSuccess)
{
printf("cudaStreamCreate failed for stream_y\n");
fflush(stdout);
return 1;
}
computeHPWL<<<block_count_nets, thread_count>>>(
x,
flat_netpin,
netpin_start,
net_mask,
num_nets,
partial_hpwl
);
computeHPWL<<<block_count_nets, thread_count, 0, stream_y>>>(
y,
flat_netpin,
netpin_start,
net_mask,
num_nets,
partial_hpwl+num_nets
);
/* destroy stream */
status = cudaStreamDestroy(stream_y);
if (status != cudaSuccess)
{
printf("stream_y destroy failed\n");
fflush(stdout);
return 1;
}
//printArray(partial_hpwl, num_nets, "partial_hpwl");
// I move out the summation to use ATen
// significant speedup is observed
//sumArray<<<1, 1>>>(partial_hpwl, num_nets, hpwl);
return 0;
}
// manually instantiate the template function
#define REGISTER_KERNEL_LAUNCHER(type) \
template int computeHPWLCudaLauncher<type>(\
const type* x, const type* y, \
const int* flat_netpin, \
const int* netpin_start, \
const unsigned char* net_mask, \
int num_nets, \
type* partial_hpwl \
);
REGISTER_KERNEL_LAUNCHER(float);
REGISTER_KERNEL_LAUNCHER(double);
DREAMPLACE_END_NAMESPACE
|
be7009f5f9de7ef221854dbc4188d1dc5eecbce1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include <fstream>
#include<stdio.h>
#include<cstdlib>
#include <sstream>
#include<string>
using namespace std;
#define width 4950
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
//device function
__device__ int calculating(int *d_flows,int *d_dist,int *d_sol,int nsize,int tidx)
{
int calcost =0;
for(int i=0;i<nsize-1;i++)
{
for(int j=i+1;j<nsize;j++)
{
calcost = calcost + ( d_flows[ ( d_sol[(tidx* nsize)+i]-1) *nsize + (d_sol[(tidx* nsize)+j]-1)]) * d_dist[i*nsize +j];
}
}
for(int k=1;k<nsize;k++)
{
for(int l=0;l<k;l++)
{
calcost = calcost + d_flows[(d_sol[(tidx* nsize)+k]-1) *nsize + (d_sol[(tidx* nsize)+l]-1)] * d_dist[k *nsize + l];
}
}
return calcost;
}
__device__ int calculate(int *d_flows,int *d_dist,int *d_sol,int nsize,int tidx,int i,int j)
{
int ccost=0,gcost=0,hcost=0;
for(int k=0;k<nsize;k++)
{
if(k!=i && k!=j)
{
gcost = (d_dist[j*nsize+k] - d_dist[i*nsize+k]) *(d_flows[(d_sol[(tidx * nsize)+i]-1) * nsize + (d_sol[(tidx * nsize)+k] - 1)] - d_flows[(d_sol[(tidx * nsize)+j]-1) * nsize + (d_sol[(tidx * nsize)+k] - 1)]);
hcost = (d_dist[k*nsize+j] - d_dist[k*nsize+i]) *(d_flows[(d_sol[(tidx * nsize)+k]-1) * nsize + (d_sol[(tidx * nsize)+i] - 1)] - d_flows[(d_sol[(tidx * nsize)+k]-1) * nsize + (d_sol[(tidx * nsize)+j] - 1)]);
ccost = ccost + (gcost + hcost);
}
}
return ccost;
}
__device__ void copy(int *d_sol,int *d_newarray,int nsize,int tidx)
{
for(int j=0;j<nsize;j++)
{
d_newarray[tidx * nsize + j] = d_sol[tidx * nsize + j];
}
}
__device__ void copy1(int *d_tmpsol,int *d_sol,int nsize,int row,int tidx)
{
for(int j=0;j<nsize;j++)
{
d_tmpsol[j] = d_sol[tidx * nsize + j];
}
}
__device__ void copy2(int *d_tmpsol,int *d_newt,int nsize)
{
for(int j=0;j<nsize;j++)
{
//d_newarray[((tidx *(nsize-1))+tx)* nsize + j] = d_tmpsol[j];
d_newt[j] = d_tmpsol[j];
}
}
__device__ void swap(int *a,int *b)
{
int temp=0;
temp = *a;
*a = *b;
*b = temp;
}
__device__ void least(int *d_newarray,int nsize,int row,int tidx,int *d_divresult,int *d_pos)
{
int temp=0,lrow=0,r;
r = (nsize * (nsize-1))/2;
temp = d_divresult[tidx*r];
for(int i=1;i<r;i++)
{
if(d_divresult[tidx*r+i]<temp)
{
temp = d_divresult[tidx*r+i];
lrow=i;
}
}
//printf("%d least position in array",lrow);
swap(&d_newarray[tidx*nsize+d_pos[lrow*2]],&d_newarray[tidx*nsize+d_pos[lrow*2+1]]);
}
__global__ void child_kernel(int tidx,int nsize,int *d_dist,int *d_flows,int *d_sol,int *d_bestcostsofar,int *d_bestsofar,int *d_result,int row,int *d_pos,int *d_newarray,int *d_divresult,int *d_frequency)
{
int ipos=0,jpos=0,ir=0,ik=0;
int tx = threadIdx.x;
int xj = ((nsize *(nsize-1))/2)/(nsize-1);
int d_tmpsol[100];
__shared__ int pos[width * 2];
if(tx<(nsize-1))
{ ik =(nsize-2)*tx;
for(int j=0;j<xj;j++)
{
pos[(tx*2)+ik] = d_pos[(tx*2)+ik];
pos[(tx*2)+(ik+1)] = d_pos[(tx*2)+(ik+1)];
ik = ik + 2;
}
}
__syncthreads();
copy1(d_tmpsol,d_sol,nsize,row,tidx);
if(tx<nsize-1)
{
ir = (nsize-2)*tx;
for(int j=0;j<xj;j++)
{
ipos=0;
jpos=0;
copy1(d_tmpsol,d_sol,nsize,row,tidx);
//swap(&d_tmpsol[pos[(tx*2)+ir]],&d_tmpsol[pos[(tx*2)+(ir+1)]]);
ipos = pos[(tx*2)+ir];
jpos = pos[(tx*2)+(ir+1)];
//printf("parent id%d\t child id %d\t iposition and jpos to swap %d %d \n",tidx,tx,ipos,jpos);
int dcost=0,ecost=0,fcost=0;
dcost = (d_dist[jpos*nsize+ipos] - d_dist[ipos*nsize+jpos])*(d_flows[(d_sol[(tidx * nsize)+ipos]-1) * nsize + (d_sol[(tidx * nsize)+jpos] - 1)] - d_flows[(d_sol[(tidx * nsize)+jpos]-1) * nsize + (d_sol[(tidx * nsize)+ipos] - 1)]);
ecost = (d_dist[jpos*nsize+jpos] - d_dist[ipos*nsize+ipos])*(d_flows[(d_sol[(tidx * nsize)+ipos]-1) * nsize + (d_sol[(tidx * nsize)+ipos] - 1)] - d_flows[(d_sol[(tidx * nsize)+jpos]-1) * nsize + (d_sol[(tidx * nsize)+jpos] - 1)]);
fcost = dcost + ecost;
int totcost=0,delta=0,tcost=0;
totcost = calculate(d_flows,d_dist,d_sol,nsize,tidx,ipos,jpos);
//__syncthreads();
//tcost=calculating(d_flows,d_dist,d_newarray,nsize,tidx);
delta = fcost + totcost;
tcost = d_result[tidx] + delta;
d_divresult[tidx*(nsize*(nsize-1)/2)+(tx*xj+j)]=tcost;
if(tcost<d_bestcostsofar[tidx])
{
d_bestcostsofar[tidx]=tcost;
swap(&d_tmpsol[ipos],&d_tmpsol[jpos]);
for(int j=0;j<nsize;j++)
{
d_bestsofar[tidx * nsize + j] = d_tmpsol[j];
//d_newarray[tidx*nsize+j] = d_tmpsol[j];
}
}
ir = ir +2;
}//end of j
}//end of k
//least(d_newarray,nsize,row,tidx,d_divresult,d_pos);
free(d_tmpsol);
}
__device__ void diversification(int *d_sol,int *d_newarray,int nsize,int tidx,int l)
{
/*if(l>nsize/2)
{
l = l/4*10;
}*/
int offset=9;
int pos,istart;
//for(int i=0;i<row;i++)
//{
pos=0;
istart =0;
for(int start=offset;start>=0;start--)
{
istart = start;
while(istart<nsize)
{
d_sol[tidx*nsize+pos] = d_newarray[tidx*nsize+istart];
pos=pos+1;
if(istart!=0)
istart = istart + offset;
else
break;
}
}
//}
}
__global__ void max(int *d_dist,int *d_flows,int *d_sol,int nsize,int row,int *d_result,int *d_bestsofar,int *d_bestcostsofar,int *d_pos,int *d_newarray,int *d_divresult,int *d_frequency)
{ int totalcost =0,divcost=0;//lrow=0;
int tidx = threadIdx.x+blockDim.x * blockIdx.x;
if(tidx < row)
{
totalcost=calculating(d_flows,d_dist,d_sol,nsize,tidx);
d_result[tidx] = totalcost;
d_bestcostsofar[tidx]=d_result[tidx];
copy(d_sol,d_bestsofar,nsize,tidx);
copy(d_sol,d_newarray,nsize,tidx);
}
__syncthreads();
/*for(int l=0;l<nsize;l++)
{
if(tidx<row)
{
int threadsPerBlock= (nsize * (nsize - 1))/nsize;
//printf("child kernel number of threads %d:",threadsPerBlock);
hipLaunchKernelGGL(( child_kernel), dim3(1),dim3(threadsPerBlock), 0, 0, tidx,nsize,d_dist,d_flows,d_sol,d_bestcostsofar,d_bestsofar,d_result,row,d_pos,d_newarray,d_divresult,d_frequency);
if (hipSuccess != hipGetLastError())
{
return;
}
//hipDeviceSynchronize();
//hipError_t err = hipGetLastError();
//if (err != hipSuccess) printf("!");
// wait for child to complete
if (hipSuccess != hipDeviceSynchronize()) {
return;
}
if(l<nsize/2)
{
for(int jc=0;jc<nsize;jc++)
{
d_sol[tidx*nsize+jc]= d_bestsofar[tidx*nsize+jc];
}
d_result[tidx] = d_bestcostsofar[tidx];
}
else
{
diversification(d_sol,d_bestsofar,nsize,tidx,l);
divcost=calculating(d_flows,d_dist,d_sol,nsize,tidx);
d_result[tidx] = divcost;
}
}
}*/
/*__syncthreads();
if(tidx<row)
{
diversification(d_newarray,d_bestsofar,nsize,tidx);
divcost=calculating(d_flows,d_dist,d_newarray,nsize,tidx);
d_result[tidx] = divcost;
}
__syncthreads();
*/
for(int l=0;l<nsize*10;l++)
{
if(tidx<row)
{
int threadsPerBlock= (nsize * (nsize - 1))/nsize;
//printf("child kernel number of threads %d:",threadsPerBlock);
hipLaunchKernelGGL(( child_kernel), dim3(1),dim3(threadsPerBlock), 0, 0, tidx,nsize,d_dist,d_flows,d_sol,d_bestcostsofar,d_bestsofar,d_result,row,d_pos,d_newarray,d_divresult,d_frequency);
if (hipSuccess != hipGetLastError())
{
return;
}
//hipDeviceSynchronize();
//hipError_t err = hipGetLastError();
//if (err != hipSuccess) printf("!");
// wait for child to complete
if (hipSuccess != hipDeviceSynchronize()) {
return;
}
if(l%5==0)
//if(l<(nsize/2+(nsize/4)))
{
if(tidx>row/2)
{
for(int jc=0;jc<nsize;jc++)
{
d_sol[tidx*nsize+jc]= d_bestsofar[tidx*nsize+jc];
}
d_result[tidx] = d_bestcostsofar[tidx];
}
else
{
diversification(d_sol,d_newarray,nsize,tidx,l);
divcost=calculating(d_flows,d_dist,d_sol,nsize,tidx);
d_result[tidx] = divcost;
}
}
else
{
copy(d_sol,d_newarray,nsize,tidx);
least(d_newarray,nsize,row,tidx,d_divresult,d_pos);
/*if(l%2==0)
{
diversification(d_sol,d_newarray,nsize,tidx,l);
divcost=calculating(d_flows,d_dist,d_sol,nsize,tidx);
d_result[tidx] = divcost;
}*/
//else
//{
for(int jc=0;jc<nsize;jc++)
{
d_sol[tidx*nsize+jc]= d_newarray[tidx*nsize+jc];
}
divcost=calculating(d_flows,d_dist,d_sol,nsize,tidx);
d_result[tidx] = divcost;
//}
}
}
}
/*
temp = d_bestcostsofar[0];
//for(int i=1;i<row;i++)
if(tidx<row)
{
if(d_bestcostsofar[tidx]<temp)
{
temp = d_bestcostsofar[tidx];
lrow=tidx;
}
__syncthreads();
}
for(int j=0;j<nsize;j++)
{
printf("%d \t",d_bestsofar[lrow*nsize+j]);
}
printf("\n");
printf("best cost %d:",temp);
//printf("\n");
*/
}
int main(int argc,char *argv[])
{
int arraySizeX,arraySizeY,size,num,seed,a=0,b=0;
clock_t cpu_startTime, cpu_endTime;
double cpu_ElapseTime=0;
cpu_startTime = clock();
ifstream input;
hipEvent_t start , stop;
float ctime;
hipEventCreate(&start);
hipEventCreate(&stop);
cout<<"input file name:"<<argv[1]<<endl;
int iseed = atoi(argv[2]);
input.open(argv[1],ios::in);
//input.open("tai25a.txt",ios::in);
if(!input.is_open())
{
cout<<"error opening file";
}
//reading the size,seed from the file
input>>size>>seed;
cout<<"array size:"<<size<<endl;
cout<<"seed value:"<<argv[2]<<endl;
//copying the size into nsize,seed into iseed variable
int nsize;
nsize=size;
arraySizeX=2*nsize;
arraySizeY=nsize;
//declaring array to copy the matrix from file into array
int** array;
array = (int**) malloc(arraySizeX*sizeof(int*));
for (int i = 0; i < arraySizeX; i++)
array[i] = (int*) malloc(arraySizeY*sizeof(int));
for(int row=0;row<(arraySizeX);row++)
{
for(int col=0;col<arraySizeY;col++){
array[row][col]=0;
}//end col for
}//end row for
//flatten array dist nd flows declarations
int size_A = nsize * nsize;
int mem_size_A = sizeof(int) * size_A;
int *h_dist = (int *)malloc(mem_size_A);
int *h_flows = (int *)malloc(mem_size_A);
int *h_a = (int *)malloc(mem_size_A);
for(int i=0;i<nsize;i++)
{
for(int j=0;j<nsize;j++)
{
h_dist[i *nsize +j] = 0;
h_flows[i* nsize +j] = 0;
}
}
while(!input.eof())
{
input>>num;
if(b==nsize)
{
a++;
b=0;
}
//a->row,b->col
if(a!=(nsize*2) && b!=nsize)
{
array[a][b]=num;
b++;
}//end if
}// end-while
input.close();
for(int row=0;row<nsize;row++)
{
for(int col=0;col<nsize;col++)
{
h_flows[row *nsize + col]=array[row][col];
}
}
//storing in dist_dup array
int irow=0;
for(int row=nsize;row<nsize*2;row++)
{
int icol=0;
for(int col=0;col<nsize;col++)
{
h_dist[irow *nsize + icol]=array[row][col];
icol++;
}
irow++;
}
cout<<"flatten distance:";
for(int i=0;i<nsize;i++)
{
for(int j=0;j<nsize;j++)
{
cout<<h_dist[i * nsize+ j]<<" ";
}
cout<<endl;
}
cout<<endl;
cout<<"flatten flows:";
for(int i=0;i<nsize;i++)
{
for(int j=0;j<nsize;j++)
{
cout<<h_flows[i * nsize+j]<<" ";
}
cout<<endl;
}
cout<<endl;
cout<<"size of the array:"<<nsize;
cout<<endl;
//srand(time(NULL));
srand(iseed);
int *init_sol,j,row=6144;
init_sol = (int *)malloc(nsize * sizeof(int));
for(int i=0;i<nsize;i++)
init_sol[i] = i+1;
int size_B = (row) * nsize;
int mem_size_B = sizeof(int) * size_B;
int *h_sol = (int *)malloc(mem_size_B);
for(int k=0;k<row;k++)
{
for(int i=nsize-1;i>0;i--)
{
j= rand() % (i+1);
int temp = init_sol[i];
init_sol[i] = init_sol[j];
init_sol[j]=temp;
}
for(int l=0;l<nsize;l++)
{
h_sol[k *nsize + l] = init_sol[l];
}
}
/*cout<<"initial solution array cpu:";
for(int i=0;i<row;i++)
{
for(int j=0;j<nsize;j++)
cout<<h_sol[i *nsize + j]<<" ";
cout<<endl;
}
*/
int size_B1 = (nsize*(nsize-1))/2 * 2;
int mem_size_B1 = sizeof(int) * size_B1;
int *h_pos = (int *)malloc(mem_size_B1);
int l=0;
for(int i=0;i<nsize-1;i++)
{
int q =l;
for(int j=i+1;j<nsize;j++)
{
int ipos=i;
int jpos=j;
h_pos[q*2+0] = ipos;
h_pos[q*2+1] = jpos;
q++;
}
l=q;
}
/*cout<<"swapping locations of the array one:";
for(int i=0;i<(nsize*(nsize-1))/2;i++)
{
for(int j=0;j<2;j++)
{
cout<<h_pos[i*2+j];
}
cout<<endl;
}
cout<<endl;
int xj = ((nsize*(nsize-1))/2)/(nsize-1);
int x=0;
cout<<"swapping locations of the array two:";
for(int i=0;i<(nsize-1);i++)
{int k=x;
for(int j=0;j<xj;j++)
{
cout<<h_pos[(i*2)+k];
cout<<h_pos[(i*2)+(k+1)];
cout<<endl;
k = k+2;
}
cout<<endl;
x=k-2;
cout<<"x value:"<<x;
cout<<endl;
}
*/
int *h_newarray;
h_newarray = (int *)malloc(row * nsize * sizeof(int));
/*cout<<"diversified array:";
for(int i=0;i<(row);i++)
{
for(j=0;j<nsize;j++)
{
cout<<h_newarray[i*nsize+j]<<" ";
}
cout<<endl;
}
*/
int *h_result,*h_frequency;
int *d_result,*d_frequency;
int *h_divresult;
int *d_divresult;
int *h_bestsofar,*h_bestcostsofar;
h_result = (int *)malloc(row * sizeof(int));
h_frequency = (int *)malloc(row * sizeof(int));
h_divresult = (int *)malloc((row*(nsize*(nsize-1))/2) * sizeof(int));
h_bestsofar = (int *)malloc(row * nsize * sizeof(int));
h_bestcostsofar = (int *)malloc((row) * sizeof(int));
for(int i=0;i<row;i++)
h_frequency[i] = (nsize * (nsize-1))/2;
hipEventRecord(start,0);
int *d_bestsofar=NULL,*d_bestcostsofar=NULL,*d_newarray=NULL;
gpuErrchk( hipMalloc((void **)&d_bestcostsofar,row * sizeof(int)) );
gpuErrchk( hipMalloc((void **)&d_bestsofar,row * nsize * sizeof(int)) );
gpuErrchk( hipMalloc((void **)&d_result,row * sizeof(int)) );
gpuErrchk( hipMalloc((void **)&d_frequency,row * sizeof(int)) );
gpuErrchk( hipMalloc((void **)&d_newarray,(row) * nsize * sizeof(int)));
gpuErrchk( hipMalloc((void **)&d_divresult,(row*(nsize*(nsize-1))/2) * sizeof(int)) );
// declaring device array and allocating memory on gpu
int *d_dist = NULL,*d_flows = NULL ,*d_sol = NULL,*d_pos=NULL;
gpuErrchk( hipMalloc((void **)&d_dist,nsize*nsize*sizeof(int)) );
gpuErrchk( hipMalloc((void **)&d_flows,nsize*nsize*sizeof(int)) );
gpuErrchk( hipMalloc((void **)&d_newarray,(row) * nsize * sizeof(int)));
gpuErrchk( hipMalloc((void **)&d_pos,(nsize*(nsize-1))/2 * 2 * sizeof(int)));
gpuErrchk( hipMalloc((void **)&d_sol,row*nsize*sizeof(int)) );
//copying arrays from host to device
gpuErrchk( hipMemcpy(d_dist,h_dist,nsize*nsize*sizeof(int),hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(d_flows,h_flows,nsize*nsize*sizeof(int),hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(d_sol,h_sol,row*nsize*sizeof(int),hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(d_pos,h_pos,(nsize*(nsize-1))/2 * 2 * sizeof(int),hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(d_bestcostsofar,h_bestcostsofar,row * sizeof(int),hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(d_bestsofar,h_bestsofar,row * nsize* sizeof(int), hipMemcpyHostToDevice));
gpuErrchk( hipMemcpy(d_newarray,h_newarray, row* nsize* sizeof(int), hipMemcpyHostToDevice));
gpuErrchk( hipMemcpy(d_result,h_result,row * sizeof(int), hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(d_frequency,h_frequency,row * sizeof(int), hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(d_divresult,h_divresult,(row*(nsize*(nsize-1))/2) * sizeof(int), hipMemcpyHostToDevice) );
//cuda kernel call
int threadsPerBlock=256;
int blockPerGrid = (row + threadsPerBlock - 1) / threadsPerBlock;
//int smSize= threadsPerBlock *nsize*nsize*sizeof(int);
cout<<"number of initial solutions:"<<row<<endl;
cout<<"number of blocks:"<<blockPerGrid<<" "<<endl;
cout<<"number of threads:"<<threadsPerBlock<<" "<<endl;hipLaunchKernelGGL((
max), dim3(blockPerGrid),dim3(threadsPerBlock), 0, 0, d_dist,d_flows,d_sol,nsize,row,d_result,d_bestsofar,d_bestcostsofar,d_pos,d_newarray,d_divresult,d_frequency);
gpuErrchk( hipPeekAtLastError() );
if (hipSuccess != hipGetLastError()) {
return 1;
}
// wait for parent to complete
if (hipSuccess != hipDeviceSynchronize()) {
return 2;
}
gpuErrchk( hipMemcpy(h_result,d_result,row * sizeof(int),hipMemcpyDeviceToHost) );
gpuErrchk( hipMemcpy(h_divresult,d_divresult,(row*(nsize*(nsize-1))/2) * sizeof(int),hipMemcpyDeviceToHost) );
//gpuErrchk( hipMemcpy(h_newarray, d_newarray , (row) * nsize* sizeof(int),hipMemcpyDeviceToHost ));
gpuErrchk( hipMemcpy(h_bestcostsofar,d_bestcostsofar,row * sizeof(int),hipMemcpyDeviceToHost) );
gpuErrchk( hipMemcpy(h_bestsofar, d_bestsofar , row * nsize* sizeof(int),hipMemcpyDeviceToHost ));
//gpuErrchk( hipMemcpy(temp_sol, d_tmpsol , (row*(nsize-1)) * nsize* sizeof(int),hipMemcpyDeviceToHost ));
gpuErrchk( hipMemcpy(h_sol,d_sol , (row) * nsize* sizeof(int),hipMemcpyDeviceToHost ));
hipEventRecord(stop,0);
hipEventSynchronize(start);
hipEventSynchronize(stop);
/*
cout<<"last sols and their cost:";
for(int i=0;i<row;i++)
{
for(int j=0;j<nsize;j++)
cout<<h_sol[i *nsize + j]<<" ";
cout<<h_result[i]<<" ";
cout<<endl;
}
cout<<"cost of best solutions sofar and best solutin array's:"<<endl;
for(int i=0;i<row;i++)
{
for(j=0;j<nsize;j++)
{
cout<<h_bestsofar[i*nsize+j]<<" ";
}
cout<<h_bestcostsofar[i]<<" ";
cout<<endl;
}
cout<<"cost of pair wise swaps"<<endl;
for(int i=0;i<(row*(nsize*(nsize-1))/2);i++)
cout<<h_divresult[i]<<" ";
cout<<endl;
*/
/*
int offset=5,pos,istart;
for(int i=0;i<row;i++)
{
pos=0;
istart =0;
for(int start=offset;start>=0;start--)
{
istart = start;
while(istart<nsize)
{
h_newarray[i*nsize+pos] = h_bestsofar[i*nsize+istart];
pos=pos+1;
if(istart!=0)
istart = istart + offset;
else
break;
}
}
}
cout<<"cpu newarray sol:";
cout<<endl;
for(int i=0;i<(row);i++)
{
for(j=0;j<nsize;j++)
{
cout<<h_newarray[i*nsize+j]<<" ";
}
cout<<endl;
}
cout<<"cost of gpu diversified initial sols:";
for(int i=0;i<row;i++)
{
for(j=0;j<nsize;j++)
{
cout<<h_newarray[i*nsize+j]<<" ";
}
cout<<h_result[i]<<" ";
cout<<endl;
}
cout<<endl;
*/
cout<<"best solutionsofar array:"<<endl;
int temp=0,lrow=0;
temp = h_bestcostsofar[0];
for(int i=1;i<row;i++)
{
if(h_bestcostsofar[i]<temp)
{
temp = h_bestcostsofar[i];
lrow=i;
}
}
for(int j=0;j<nsize;j++)
{
cout<<h_bestsofar[lrow*nsize+j]<<" ";
}
cout<<endl;
cout<<"best cost:"<<temp<<endl;
//*/
cpu_endTime = clock();
cpu_ElapseTime= ((cpu_endTime - cpu_startTime) /(double) CLOCKS_PER_SEC);
cout<<"total execution time in seconds:"<<cpu_ElapseTime<<endl;
hipEventElapsedTime(&ctime, start , stop);
cout<<"time for the kernel in milliseconds:"<<ctime<<endl;
hipEventDestroy(start);
hipEventDestroy(stop);
cout<<endl;
free(array);
free(h_dist);
free(h_flows);
free(init_sol);
free(h_sol);
free(h_pos);
free(h_result);
free(h_bestsofar);
free(h_bestcostsofar);
hipFree(d_dist);
hipFree(d_flows);
hipFree(d_sol);
hipFree(d_bestcostsofar);
hipFree(d_result);
hipFree(d_bestsofar);
hipFree(d_pos);
return 0;
}
| be7009f5f9de7ef221854dbc4188d1dc5eecbce1.cu | #include<iostream>
#include <fstream>
#include<stdio.h>
#include<cstdlib>
#include <sstream>
#include<string>
using namespace std;
#define width 4950
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
//device function
__device__ int calculating(int *d_flows,int *d_dist,int *d_sol,int nsize,int tidx)
{
int calcost =0;
for(int i=0;i<nsize-1;i++)
{
for(int j=i+1;j<nsize;j++)
{
calcost = calcost + ( d_flows[ ( d_sol[(tidx* nsize)+i]-1) *nsize + (d_sol[(tidx* nsize)+j]-1)]) * d_dist[i*nsize +j];
}
}
for(int k=1;k<nsize;k++)
{
for(int l=0;l<k;l++)
{
calcost = calcost + d_flows[(d_sol[(tidx* nsize)+k]-1) *nsize + (d_sol[(tidx* nsize)+l]-1)] * d_dist[k *nsize + l];
}
}
return calcost;
}
__device__ int calculate(int *d_flows,int *d_dist,int *d_sol,int nsize,int tidx,int i,int j)
{
int ccost=0,gcost=0,hcost=0;
for(int k=0;k<nsize;k++)
{
if(k!=i && k!=j)
{
gcost = (d_dist[j*nsize+k] - d_dist[i*nsize+k]) *(d_flows[(d_sol[(tidx * nsize)+i]-1) * nsize + (d_sol[(tidx * nsize)+k] - 1)] - d_flows[(d_sol[(tidx * nsize)+j]-1) * nsize + (d_sol[(tidx * nsize)+k] - 1)]);
hcost = (d_dist[k*nsize+j] - d_dist[k*nsize+i]) *(d_flows[(d_sol[(tidx * nsize)+k]-1) * nsize + (d_sol[(tidx * nsize)+i] - 1)] - d_flows[(d_sol[(tidx * nsize)+k]-1) * nsize + (d_sol[(tidx * nsize)+j] - 1)]);
ccost = ccost + (gcost + hcost);
}
}
return ccost;
}
__device__ void copy(int *d_sol,int *d_newarray,int nsize,int tidx)
{
for(int j=0;j<nsize;j++)
{
d_newarray[tidx * nsize + j] = d_sol[tidx * nsize + j];
}
}
__device__ void copy1(int *d_tmpsol,int *d_sol,int nsize,int row,int tidx)
{
for(int j=0;j<nsize;j++)
{
d_tmpsol[j] = d_sol[tidx * nsize + j];
}
}
__device__ void copy2(int *d_tmpsol,int *d_newt,int nsize)
{
for(int j=0;j<nsize;j++)
{
//d_newarray[((tidx *(nsize-1))+tx)* nsize + j] = d_tmpsol[j];
d_newt[j] = d_tmpsol[j];
}
}
__device__ void swap(int *a,int *b)
{
int temp=0;
temp = *a;
*a = *b;
*b = temp;
}
__device__ void least(int *d_newarray,int nsize,int row,int tidx,int *d_divresult,int *d_pos)
{
int temp=0,lrow=0,r;
r = (nsize * (nsize-1))/2;
temp = d_divresult[tidx*r];
for(int i=1;i<r;i++)
{
if(d_divresult[tidx*r+i]<temp)
{
temp = d_divresult[tidx*r+i];
lrow=i;
}
}
//printf("%d least position in array",lrow);
swap(&d_newarray[tidx*nsize+d_pos[lrow*2]],&d_newarray[tidx*nsize+d_pos[lrow*2+1]]);
}
__global__ void child_kernel(int tidx,int nsize,int *d_dist,int *d_flows,int *d_sol,int *d_bestcostsofar,int *d_bestsofar,int *d_result,int row,int *d_pos,int *d_newarray,int *d_divresult,int *d_frequency)
{
int ipos=0,jpos=0,ir=0,ik=0;
int tx = threadIdx.x;
int xj = ((nsize *(nsize-1))/2)/(nsize-1);
int d_tmpsol[100];
__shared__ int pos[width * 2];
if(tx<(nsize-1))
{ ik =(nsize-2)*tx;
for(int j=0;j<xj;j++)
{
pos[(tx*2)+ik] = d_pos[(tx*2)+ik];
pos[(tx*2)+(ik+1)] = d_pos[(tx*2)+(ik+1)];
ik = ik + 2;
}
}
__syncthreads();
copy1(d_tmpsol,d_sol,nsize,row,tidx);
if(tx<nsize-1)
{
ir = (nsize-2)*tx;
for(int j=0;j<xj;j++)
{
ipos=0;
jpos=0;
copy1(d_tmpsol,d_sol,nsize,row,tidx);
//swap(&d_tmpsol[pos[(tx*2)+ir]],&d_tmpsol[pos[(tx*2)+(ir+1)]]);
ipos = pos[(tx*2)+ir];
jpos = pos[(tx*2)+(ir+1)];
//printf("parent id%d\t child id %d\t iposition and jpos to swap %d %d \n",tidx,tx,ipos,jpos);
int dcost=0,ecost=0,fcost=0;
dcost = (d_dist[jpos*nsize+ipos] - d_dist[ipos*nsize+jpos])*(d_flows[(d_sol[(tidx * nsize)+ipos]-1) * nsize + (d_sol[(tidx * nsize)+jpos] - 1)] - d_flows[(d_sol[(tidx * nsize)+jpos]-1) * nsize + (d_sol[(tidx * nsize)+ipos] - 1)]);
ecost = (d_dist[jpos*nsize+jpos] - d_dist[ipos*nsize+ipos])*(d_flows[(d_sol[(tidx * nsize)+ipos]-1) * nsize + (d_sol[(tidx * nsize)+ipos] - 1)] - d_flows[(d_sol[(tidx * nsize)+jpos]-1) * nsize + (d_sol[(tidx * nsize)+jpos] - 1)]);
fcost = dcost + ecost;
int totcost=0,delta=0,tcost=0;
totcost = calculate(d_flows,d_dist,d_sol,nsize,tidx,ipos,jpos);
//__syncthreads();
//tcost=calculating(d_flows,d_dist,d_newarray,nsize,tidx);
delta = fcost + totcost;
tcost = d_result[tidx] + delta;
d_divresult[tidx*(nsize*(nsize-1)/2)+(tx*xj+j)]=tcost;
if(tcost<d_bestcostsofar[tidx])
{
d_bestcostsofar[tidx]=tcost;
swap(&d_tmpsol[ipos],&d_tmpsol[jpos]);
for(int j=0;j<nsize;j++)
{
d_bestsofar[tidx * nsize + j] = d_tmpsol[j];
//d_newarray[tidx*nsize+j] = d_tmpsol[j];
}
}
ir = ir +2;
}//end of j
}//end of k
//least(d_newarray,nsize,row,tidx,d_divresult,d_pos);
free(d_tmpsol);
}
__device__ void diversification(int *d_sol,int *d_newarray,int nsize,int tidx,int l)
{
/*if(l>nsize/2)
{
l = l/4*10;
}*/
int offset=9;
int pos,istart;
//for(int i=0;i<row;i++)
//{
pos=0;
istart =0;
for(int start=offset;start>=0;start--)
{
istart = start;
while(istart<nsize)
{
d_sol[tidx*nsize+pos] = d_newarray[tidx*nsize+istart];
pos=pos+1;
if(istart!=0)
istart = istart + offset;
else
break;
}
}
//}
}
__global__ void max(int *d_dist,int *d_flows,int *d_sol,int nsize,int row,int *d_result,int *d_bestsofar,int *d_bestcostsofar,int *d_pos,int *d_newarray,int *d_divresult,int *d_frequency)
{ int totalcost =0,divcost=0;//lrow=0;
int tidx = threadIdx.x+blockDim.x * blockIdx.x;
if(tidx < row)
{
totalcost=calculating(d_flows,d_dist,d_sol,nsize,tidx);
d_result[tidx] = totalcost;
d_bestcostsofar[tidx]=d_result[tidx];
copy(d_sol,d_bestsofar,nsize,tidx);
copy(d_sol,d_newarray,nsize,tidx);
}
__syncthreads();
/*for(int l=0;l<nsize;l++)
{
if(tidx<row)
{
int threadsPerBlock= (nsize * (nsize - 1))/nsize;
//printf("child kernel number of threads %d:",threadsPerBlock);
child_kernel<<<1,threadsPerBlock>>>(tidx,nsize,d_dist,d_flows,d_sol,d_bestcostsofar,d_bestsofar,d_result,row,d_pos,d_newarray,d_divresult,d_frequency);
if (cudaSuccess != cudaGetLastError())
{
return;
}
//cudaDeviceSynchronize();
//cudaError_t err = cudaGetLastError();
//if (err != cudaSuccess) printf("!");
// wait for child to complete
if (cudaSuccess != cudaDeviceSynchronize()) {
return;
}
if(l<nsize/2)
{
for(int jc=0;jc<nsize;jc++)
{
d_sol[tidx*nsize+jc]= d_bestsofar[tidx*nsize+jc];
}
d_result[tidx] = d_bestcostsofar[tidx];
}
else
{
diversification(d_sol,d_bestsofar,nsize,tidx,l);
divcost=calculating(d_flows,d_dist,d_sol,nsize,tidx);
d_result[tidx] = divcost;
}
}
}*/
/*__syncthreads();
if(tidx<row)
{
diversification(d_newarray,d_bestsofar,nsize,tidx);
divcost=calculating(d_flows,d_dist,d_newarray,nsize,tidx);
d_result[tidx] = divcost;
}
__syncthreads();
*/
for(int l=0;l<nsize*10;l++)
{
if(tidx<row)
{
int threadsPerBlock= (nsize * (nsize - 1))/nsize;
//printf("child kernel number of threads %d:",threadsPerBlock);
child_kernel<<<1,threadsPerBlock>>>(tidx,nsize,d_dist,d_flows,d_sol,d_bestcostsofar,d_bestsofar,d_result,row,d_pos,d_newarray,d_divresult,d_frequency);
if (cudaSuccess != cudaGetLastError())
{
return;
}
//cudaDeviceSynchronize();
//cudaError_t err = cudaGetLastError();
//if (err != cudaSuccess) printf("!");
// wait for child to complete
if (cudaSuccess != cudaDeviceSynchronize()) {
return;
}
if(l%5==0)
//if(l<(nsize/2+(nsize/4)))
{
if(tidx>row/2)
{
for(int jc=0;jc<nsize;jc++)
{
d_sol[tidx*nsize+jc]= d_bestsofar[tidx*nsize+jc];
}
d_result[tidx] = d_bestcostsofar[tidx];
}
else
{
diversification(d_sol,d_newarray,nsize,tidx,l);
divcost=calculating(d_flows,d_dist,d_sol,nsize,tidx);
d_result[tidx] = divcost;
}
}
else
{
copy(d_sol,d_newarray,nsize,tidx);
least(d_newarray,nsize,row,tidx,d_divresult,d_pos);
/*if(l%2==0)
{
diversification(d_sol,d_newarray,nsize,tidx,l);
divcost=calculating(d_flows,d_dist,d_sol,nsize,tidx);
d_result[tidx] = divcost;
}*/
//else
//{
for(int jc=0;jc<nsize;jc++)
{
d_sol[tidx*nsize+jc]= d_newarray[tidx*nsize+jc];
}
divcost=calculating(d_flows,d_dist,d_sol,nsize,tidx);
d_result[tidx] = divcost;
//}
}
}
}
/*
temp = d_bestcostsofar[0];
//for(int i=1;i<row;i++)
if(tidx<row)
{
if(d_bestcostsofar[tidx]<temp)
{
temp = d_bestcostsofar[tidx];
lrow=tidx;
}
__syncthreads();
}
for(int j=0;j<nsize;j++)
{
printf("%d \t",d_bestsofar[lrow*nsize+j]);
}
printf("\n");
printf("best cost %d:",temp);
//printf("\n");
*/
}
int main(int argc,char *argv[])
{
int arraySizeX,arraySizeY,size,num,seed,a=0,b=0;
clock_t cpu_startTime, cpu_endTime;
double cpu_ElapseTime=0;
cpu_startTime = clock();
ifstream input;
cudaEvent_t start , stop;
float ctime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cout<<"input file name:"<<argv[1]<<endl;
int iseed = atoi(argv[2]);
input.open(argv[1],ios::in);
//input.open("tai25a.txt",ios::in);
if(!input.is_open())
{
cout<<"error opening file";
}
//reading the size,seed from the file
input>>size>>seed;
cout<<"array size:"<<size<<endl;
cout<<"seed value:"<<argv[2]<<endl;
//copying the size into nsize,seed into iseed variable
int nsize;
nsize=size;
arraySizeX=2*nsize;
arraySizeY=nsize;
//declaring array to copy the matrix from file into array
int** array;
array = (int**) malloc(arraySizeX*sizeof(int*));
for (int i = 0; i < arraySizeX; i++)
array[i] = (int*) malloc(arraySizeY*sizeof(int));
for(int row=0;row<(arraySizeX);row++)
{
for(int col=0;col<arraySizeY;col++){
array[row][col]=0;
}//end col for
}//end row for
//flatten array dist nd flows declarations
int size_A = nsize * nsize;
int mem_size_A = sizeof(int) * size_A;
int *h_dist = (int *)malloc(mem_size_A);
int *h_flows = (int *)malloc(mem_size_A);
int *h_a = (int *)malloc(mem_size_A);
for(int i=0;i<nsize;i++)
{
for(int j=0;j<nsize;j++)
{
h_dist[i *nsize +j] = 0;
h_flows[i* nsize +j] = 0;
}
}
while(!input.eof())
{
input>>num;
if(b==nsize)
{
a++;
b=0;
}
//a->row,b->col
if(a!=(nsize*2) && b!=nsize)
{
array[a][b]=num;
b++;
}//end if
}// end-while
input.close();
for(int row=0;row<nsize;row++)
{
for(int col=0;col<nsize;col++)
{
h_flows[row *nsize + col]=array[row][col];
}
}
//storing in dist_dup array
int irow=0;
for(int row=nsize;row<nsize*2;row++)
{
int icol=0;
for(int col=0;col<nsize;col++)
{
h_dist[irow *nsize + icol]=array[row][col];
icol++;
}
irow++;
}
cout<<"flatten distance:";
for(int i=0;i<nsize;i++)
{
for(int j=0;j<nsize;j++)
{
cout<<h_dist[i * nsize+ j]<<" ";
}
cout<<endl;
}
cout<<endl;
cout<<"flatten flows:";
for(int i=0;i<nsize;i++)
{
for(int j=0;j<nsize;j++)
{
cout<<h_flows[i * nsize+j]<<" ";
}
cout<<endl;
}
cout<<endl;
cout<<"size of the array:"<<nsize;
cout<<endl;
//srand(time(NULL));
srand(iseed);
int *init_sol,j,row=6144;
init_sol = (int *)malloc(nsize * sizeof(int));
for(int i=0;i<nsize;i++)
init_sol[i] = i+1;
int size_B = (row) * nsize;
int mem_size_B = sizeof(int) * size_B;
int *h_sol = (int *)malloc(mem_size_B);
for(int k=0;k<row;k++)
{
for(int i=nsize-1;i>0;i--)
{
j= rand() % (i+1);
int temp = init_sol[i];
init_sol[i] = init_sol[j];
init_sol[j]=temp;
}
for(int l=0;l<nsize;l++)
{
h_sol[k *nsize + l] = init_sol[l];
}
}
/*cout<<"initial solution array cpu:";
for(int i=0;i<row;i++)
{
for(int j=0;j<nsize;j++)
cout<<h_sol[i *nsize + j]<<" ";
cout<<endl;
}
*/
int size_B1 = (nsize*(nsize-1))/2 * 2;
int mem_size_B1 = sizeof(int) * size_B1;
int *h_pos = (int *)malloc(mem_size_B1);
int l=0;
for(int i=0;i<nsize-1;i++)
{
int q =l;
for(int j=i+1;j<nsize;j++)
{
int ipos=i;
int jpos=j;
h_pos[q*2+0] = ipos;
h_pos[q*2+1] = jpos;
q++;
}
l=q;
}
/*cout<<"swapping locations of the array one:";
for(int i=0;i<(nsize*(nsize-1))/2;i++)
{
for(int j=0;j<2;j++)
{
cout<<h_pos[i*2+j];
}
cout<<endl;
}
cout<<endl;
int xj = ((nsize*(nsize-1))/2)/(nsize-1);
int x=0;
cout<<"swapping locations of the array two:";
for(int i=0;i<(nsize-1);i++)
{int k=x;
for(int j=0;j<xj;j++)
{
cout<<h_pos[(i*2)+k];
cout<<h_pos[(i*2)+(k+1)];
cout<<endl;
k = k+2;
}
cout<<endl;
x=k-2;
cout<<"x value:"<<x;
cout<<endl;
}
*/
int *h_newarray;
h_newarray = (int *)malloc(row * nsize * sizeof(int));
/*cout<<"diversified array:";
for(int i=0;i<(row);i++)
{
for(j=0;j<nsize;j++)
{
cout<<h_newarray[i*nsize+j]<<" ";
}
cout<<endl;
}
*/
int *h_result,*h_frequency;
int *d_result,*d_frequency;
int *h_divresult;
int *d_divresult;
int *h_bestsofar,*h_bestcostsofar;
h_result = (int *)malloc(row * sizeof(int));
h_frequency = (int *)malloc(row * sizeof(int));
h_divresult = (int *)malloc((row*(nsize*(nsize-1))/2) * sizeof(int));
h_bestsofar = (int *)malloc(row * nsize * sizeof(int));
h_bestcostsofar = (int *)malloc((row) * sizeof(int));
for(int i=0;i<row;i++)
h_frequency[i] = (nsize * (nsize-1))/2;
cudaEventRecord(start,0);
int *d_bestsofar=NULL,*d_bestcostsofar=NULL,*d_newarray=NULL;
gpuErrchk( cudaMalloc((void **)&d_bestcostsofar,row * sizeof(int)) );
gpuErrchk( cudaMalloc((void **)&d_bestsofar,row * nsize * sizeof(int)) );
gpuErrchk( cudaMalloc((void **)&d_result,row * sizeof(int)) );
gpuErrchk( cudaMalloc((void **)&d_frequency,row * sizeof(int)) );
gpuErrchk( cudaMalloc((void **)&d_newarray,(row) * nsize * sizeof(int)));
gpuErrchk( cudaMalloc((void **)&d_divresult,(row*(nsize*(nsize-1))/2) * sizeof(int)) );
// declaring device array and allocating memory on gpu
int *d_dist = NULL,*d_flows = NULL ,*d_sol = NULL,*d_pos=NULL;
gpuErrchk( cudaMalloc((void **)&d_dist,nsize*nsize*sizeof(int)) );
gpuErrchk( cudaMalloc((void **)&d_flows,nsize*nsize*sizeof(int)) );
gpuErrchk( cudaMalloc((void **)&d_newarray,(row) * nsize * sizeof(int)));
gpuErrchk( cudaMalloc((void **)&d_pos,(nsize*(nsize-1))/2 * 2 * sizeof(int)));
gpuErrchk( cudaMalloc((void **)&d_sol,row*nsize*sizeof(int)) );
//copying arrays from host to device
gpuErrchk( cudaMemcpy(d_dist,h_dist,nsize*nsize*sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_flows,h_flows,nsize*nsize*sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_sol,h_sol,row*nsize*sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_pos,h_pos,(nsize*(nsize-1))/2 * 2 * sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_bestcostsofar,h_bestcostsofar,row * sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_bestsofar,h_bestsofar,row * nsize* sizeof(int), cudaMemcpyHostToDevice));
gpuErrchk( cudaMemcpy(d_newarray,h_newarray, row* nsize* sizeof(int), cudaMemcpyHostToDevice));
gpuErrchk( cudaMemcpy(d_result,h_result,row * sizeof(int), cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_frequency,h_frequency,row * sizeof(int), cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_divresult,h_divresult,(row*(nsize*(nsize-1))/2) * sizeof(int), cudaMemcpyHostToDevice) );
//cuda kernel call
int threadsPerBlock=256;
int blockPerGrid = (row + threadsPerBlock - 1) / threadsPerBlock;
//int smSize= threadsPerBlock *nsize*nsize*sizeof(int);
cout<<"number of initial solutions:"<<row<<endl;
cout<<"number of blocks:"<<blockPerGrid<<" "<<endl;
cout<<"number of threads:"<<threadsPerBlock<<" "<<endl;
max<<<blockPerGrid,threadsPerBlock>>>(d_dist,d_flows,d_sol,nsize,row,d_result,d_bestsofar,d_bestcostsofar,d_pos,d_newarray,d_divresult,d_frequency);
gpuErrchk( cudaPeekAtLastError() );
if (cudaSuccess != cudaGetLastError()) {
return 1;
}
// wait for parent to complete
if (cudaSuccess != cudaDeviceSynchronize()) {
return 2;
}
gpuErrchk( cudaMemcpy(h_result,d_result,row * sizeof(int),cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(h_divresult,d_divresult,(row*(nsize*(nsize-1))/2) * sizeof(int),cudaMemcpyDeviceToHost) );
//gpuErrchk( cudaMemcpy(h_newarray, d_newarray , (row) * nsize* sizeof(int),cudaMemcpyDeviceToHost ));
gpuErrchk( cudaMemcpy(h_bestcostsofar,d_bestcostsofar,row * sizeof(int),cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(h_bestsofar, d_bestsofar , row * nsize* sizeof(int),cudaMemcpyDeviceToHost ));
//gpuErrchk( cudaMemcpy(temp_sol, d_tmpsol , (row*(nsize-1)) * nsize* sizeof(int),cudaMemcpyDeviceToHost ));
gpuErrchk( cudaMemcpy(h_sol,d_sol , (row) * nsize* sizeof(int),cudaMemcpyDeviceToHost ));
cudaEventRecord(stop,0);
cudaEventSynchronize(start);
cudaEventSynchronize(stop);
/*
cout<<"last sols and their cost:";
for(int i=0;i<row;i++)
{
for(int j=0;j<nsize;j++)
cout<<h_sol[i *nsize + j]<<" ";
cout<<h_result[i]<<" ";
cout<<endl;
}
cout<<"cost of best solutions sofar and best solutin array's:"<<endl;
for(int i=0;i<row;i++)
{
for(j=0;j<nsize;j++)
{
cout<<h_bestsofar[i*nsize+j]<<" ";
}
cout<<h_bestcostsofar[i]<<" ";
cout<<endl;
}
cout<<"cost of pair wise swaps"<<endl;
for(int i=0;i<(row*(nsize*(nsize-1))/2);i++)
cout<<h_divresult[i]<<" ";
cout<<endl;
*/
/*
int offset=5,pos,istart;
for(int i=0;i<row;i++)
{
pos=0;
istart =0;
for(int start=offset;start>=0;start--)
{
istart = start;
while(istart<nsize)
{
h_newarray[i*nsize+pos] = h_bestsofar[i*nsize+istart];
pos=pos+1;
if(istart!=0)
istart = istart + offset;
else
break;
}
}
}
cout<<"cpu newarray sol:";
cout<<endl;
for(int i=0;i<(row);i++)
{
for(j=0;j<nsize;j++)
{
cout<<h_newarray[i*nsize+j]<<" ";
}
cout<<endl;
}
cout<<"cost of gpu diversified initial sols:";
for(int i=0;i<row;i++)
{
for(j=0;j<nsize;j++)
{
cout<<h_newarray[i*nsize+j]<<" ";
}
cout<<h_result[i]<<" ";
cout<<endl;
}
cout<<endl;
*/
cout<<"best solutionsofar array:"<<endl;
int temp=0,lrow=0;
temp = h_bestcostsofar[0];
for(int i=1;i<row;i++)
{
if(h_bestcostsofar[i]<temp)
{
temp = h_bestcostsofar[i];
lrow=i;
}
}
for(int j=0;j<nsize;j++)
{
cout<<h_bestsofar[lrow*nsize+j]<<" ";
}
cout<<endl;
cout<<"best cost:"<<temp<<endl;
//*/
cpu_endTime = clock();
cpu_ElapseTime= ((cpu_endTime - cpu_startTime) /(double) CLOCKS_PER_SEC);
cout<<"total execution time in seconds:"<<cpu_ElapseTime<<endl;
cudaEventElapsedTime(&ctime, start , stop);
cout<<"time for the kernel in milliseconds:"<<ctime<<endl;
cudaEventDestroy(start);
cudaEventDestroy(stop);
cout<<endl;
free(array);
free(h_dist);
free(h_flows);
free(init_sol);
free(h_sol);
free(h_pos);
free(h_result);
free(h_bestsofar);
free(h_bestcostsofar);
cudaFree(d_dist);
cudaFree(d_flows);
cudaFree(d_sol);
cudaFree(d_bestcostsofar);
cudaFree(d_result);
cudaFree(d_bestsofar);
cudaFree(d_pos);
return 0;
}
|
ac6ad47ae9a3080ab3496528e04f0e8d7b7eae9c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "mixed_tentusscher_myo_epi_2004_S2_4.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\n\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(hipMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(hipMemcpy(mapping_device, mapping, extra_data_bytes_size, hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, mapping_device, num_volumes);
check_cuda_error( hipPeekAtLastError() );
hipDeviceSynchronize();
check_cuda_error(hipFree(mapping_device));
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu)
{
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL)
{
check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice));
}
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(hipMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(hipMemcpy(mapping_device, mapping, extra_data_bytes_size, hipMemcpyHostToDevice));
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps);
check_cuda_error( hipPeekAtLastError() );
check_cuda_error(hipFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device));
if(mapping_device) check_cuda_error(hipFree(mapping_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes)
{
// Initial conditions for TenTusscher 2004 myocardium
if (mapping[threadID] == 0)
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
// Initial conditions for TenTusscher 2004 epicardium
else
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5574211764260,0.00129305755715058,0.779441422719268,0.779241742711666,0.000175039240857358,0.484977289081740,0.00294257507368012,0.999998344595344,1.93700269716616e-08,1.89380174481509e-05,0.999773792418493,1.00755963480393,0.999999137126184,3.41466316398601e-05,1.23162815450729,9.71224673801957,139.552422843336}; for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve)
{
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n)
{
if (mapping[sv_id] == 0)
{
RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt);
for(int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
else
{
RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt);
for (int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
}
}
}
inline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
inline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.0344988699429,0.000243427554127383,0.000161272832250911,0.000484228011827550,0.275092424538870,0.175484829191378,0.164879494363494,3.77803127027096,0.0197412874581791,1.93055058781161,1099.31582404877,0.000553709594039336,0.144015543772373,0.0199814298252655,0.00826445055600327,9.00070147931675e-06};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
| ac6ad47ae9a3080ab3496528e04f0e8d7b7eae9c.cu | #include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "mixed_tentusscher_myo_epi_2004_S2_4.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium GPU model\n\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice));
}
kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, mapping_device, num_volumes);
check_cuda_error( cudaPeekAtLastError() );
cudaDeviceSynchronize();
check_cuda_error(cudaFree(mapping_device));
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu)
{
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL)
{
check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));
}
// Get the mapping array
uint32_t *mapping = NULL;
uint32_t *mapping_device = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
check_cuda_error(cudaMalloc((void **)&mapping_device, extra_data_bytes_size));
check_cuda_error(cudaMemcpy(mapping_device, mapping, extra_data_bytes_size, cudaMemcpyHostToDevice));
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, mapping_device, num_cells_to_solve, num_steps);
check_cuda_error( cudaPeekAtLastError() );
check_cuda_error(cudaFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));
if(mapping_device) check_cuda_error(cudaFree(mapping_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, uint32_t *mapping, int num_volumes)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < num_volumes)
{
// Initial conditions for TenTusscher 2004 myocardium
if (mapping[threadID] == 0)
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
// Initial conditions for TenTusscher 2004 epicardium
else
{
// Default initial conditions
/*
*((real * )((char *) sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) = 0.f; //M
*((real * )((char *) sv + pitch * 2) + threadID) = 0.75; //H
*((real * )((char *) sv + pitch * 3) + threadID) = 0.75f; //J
*((real * )((char *) sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = 0.f; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) = 1.f; //S
*((real * )((char *) sv + pitch * 8) + threadID) = 0.f; //R
*((real * )((char *) sv + pitch * 9) + threadID) = 0.f; //D
*((real * )((char *) sv + pitch * 10) + threadID) = 1.f; //F
*((real * )((char *) sv + pitch * 11) + threadID) = 1.f; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) = 1.f; //G
*((real * )((char *) sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5574211764260,0.00129305755715058,0.779441422719268,0.779241742711666,0.000175039240857358,0.484977289081740,0.00294257507368012,0.999998344595344,1.93700269716616e-08,1.89380174481509e-05,0.999773792418493,1.00755963480393,0.999999137126184,3.41466316398601e-05,1.23162815450729,9.71224673801957,139.552422843336}; for (uint32_t i = 0; i < NEQ; i++)
*((real * )((char *) sv + pitch * i) + threadID) = sv_sst[i];
}
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t *mapping, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve)
{
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n)
{
if (mapping[sv_id] == 0)
{
RHS_gpu_myo(sv, rDY, stim_currents[threadID], sv_id, dt);
for(int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
else
{
RHS_gpu_epi(sv, rDY, stim_currents[threadID], sv_id, dt);
for (int i = 0; i < NEQ; i++)
{
*((real *) ((char *) sv + pitch * i) + sv_id) = dt * rDY[i] + *((real *) ((char *) sv + pitch * i) + sv_id);
}
}
}
}
}
inline __device__ void RHS_gpu_myo (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
inline __device__ void RHS_gpu_epi (real *sv_, real *rDY_, real stim_current, int threadID_, real dt)
{
// State variables
real svolt = *((real*)((char*)sv_ + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv_ + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv_ + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv_ + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv_ + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv_ + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv_ + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv_ + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv_ + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv_ + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv_ + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv_ + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv_ + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv_ + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv_ + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv_ + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv_ + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.0344988699429,0.000243427554127383,0.000161272832250911,0.000484228011827550,0.275092424538870,0.175484829191378,0.164879494363494,3.77803127027096,0.0197412874581791,1.93055058781161,1099.31582404877,0.000553709594039336,0.144015543772373,0.0199814298252655,0.00826445055600327,9.00070147931675e-06};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
6dea8c7cd9286d2a8153befeb40c25b3f8f0e365.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zgesellcmmv.cu normal z -> s, Wed Sep 17 15:08:43 2014
*/
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "common_magma.h"
#include "sm_32_intrinsics.h"
#define PRECISION_s
//#define TEXTURE
/*
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_4_ldg( int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
const float* __restrict__ d_x,
float beta,
float *d_y)
{
#if defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
float x1, x2, v1, v2;
d_colind += offset + ldx ;
d_val += offset + ldx;
for ( kk = 0; kk < max_-1 ; kk+=2 ){
i1 = d_colind[ block*kk];
i2 = d_colind[ block*kk + block];
x1 = __ldg( d_x+ i1 );
x2 = __ldg( d_x+ i2 );
v1 = d_val[ block*kk ];
v2 = d_val[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_){
x1 = __ldg( d_x + d_colind[ block*kk] );
v1 = d_val[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 2 ){
shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row];
}
}
}
#endif
}
*/
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning one thread to each row - 1D kernel
__global__ void
zgesellptmv2d_kernel_1( int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
float* d_x,
float beta,
float *d_y)
{
// threads assigned to rows
int Idx = blockDim.x * blockIdx.x + threadIdx.x ;
int offset = d_rowptr[ blockIdx.x ];
int border = (d_rowptr[ blockIdx.x+1 ]-offset)/blocksize;
if(Idx < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n++){
int col = d_colind [offset+ blocksize * n + threadIdx.x ];
float val = d_val[offset+ blocksize * n + threadIdx.x];
if( val != 0){
dot=dot+val*d_x[col];
}
}
d_y[ Idx ] = dot * alpha + beta * d_y [ Idx ];
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_4( int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
float* d_x,
float beta,
float *d_y)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
float x1, x2, v1, v2;
d_colind += offset + ldx ;
d_val += offset + ldx;
for ( kk = 0; kk < max_-1 ; kk+=2 ){
i1 = d_colind[ block*kk];
i2 = d_colind[ block*kk + block];
x1 = d_x[ i1 ];
x2 = d_x[ i2 ];
v1 = d_val[ block*kk ];
v2 = d_val[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_){
x1 = d_x[ d_colind[ block*kk] ];
v1 = d_val[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 2 ){
shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row];
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_8( int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
float* d_x,
float beta,
float *d_y)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
float x1, x2, v1, v2;
d_colind += offset + ldx ;
d_val += offset + ldx;
for ( kk = 0; kk < max_-1 ; kk+=2 ){
i1 = d_colind[ block*kk];
i2 = d_colind[ block*kk + block];
x1 = d_x[ i1 ];
x2 = d_x[ i2 ];
v1 = d_val[ block*kk ];
v2 = d_val[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_){
x1 = d_x[ d_colind[ block*kk] ];
v1 = d_val[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ){
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row];
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_16( int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
float* d_x,
float beta,
float *d_y)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
float val =
d_val[ offset + ldx + block*k ];
int col =
d_colind[ offset + ldx + block*k ];
dot += val * d_x[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ){
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row];
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_32( int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
float* d_x,
float beta,
float *d_y)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
float val =
d_val[ offset + ldx + block*k ];
int col =
d_colind[ offset + ldx + block*k ];
dot += val * d_x[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ){
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row];
}
}
}
}
/************************* same but using texture mem *************************/
#if defined(PRECISION_d) && defined(TEXTURE)
__inline__ __device__ float
read_from_tex( hipTextureObject_t texdx, const int& i){
int2 temp = tex1Dfetch<int2>( texdx, i );
return __hiloint2float(temp.y,temp.x);
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_4_tex( int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
hipTextureObject_t texdx,
float beta,
float *d_y)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
float x1, x2, v1, v2;
d_colind += offset + ldx ;
d_val += offset + ldx;
for ( kk = 0; kk < max_-1 ; kk+=2 ){
i1 = d_colind[ block*kk];
i2 = d_colind[ block*kk + block];
x1 = read_from_tex( texdx, i1 );
x2 = read_from_tex( texdx, i2 );
v1 = d_val[ block*kk ];
v2 = d_val[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_){
x1 = read_from_tex( texdx, d_colind[ block*kk] );
v1 = d_val[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 2 ){
shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row];
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_8_tex( int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
hipTextureObject_t texdx,
float beta,
float *d_y)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
float x1, x2, v1, v2;
d_colind += offset + ldx ;
d_val += offset + ldx;
for ( kk = 0; kk < max_-1 ; kk+=2 ){
i1 = d_colind[ block*kk];
i2 = d_colind[ block*kk + block];
x1 = read_from_tex( texdx, i1 );
x2 = read_from_tex( texdx, i2 );
v1 = d_val[ block*kk ];
v2 = d_val[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_){
x1 = read_from_tex( texdx, d_colind[ block*kk] );
v1 = d_val[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ){
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row];
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_16_tex( int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
hipTextureObject_t texdx,
float beta,
float *d_y)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
float val =
d_val[ offset + ldx + block*k ];
int col =
d_colind[ offset + ldx + block*k ];
dot += val * read_from_tex( texdx, col );
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ){
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row];
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_32_tex( int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
hipTextureObject_t texdx,
float beta,
float *d_y)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
float val =
d_val[ offset + ldx + block*k ];
int col =
d_colind[ offset + ldx + block*k ];
dot += val * read_from_tex( texdx, col );
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ){
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row];
}
}
}
}
#endif
/********************* end of texture versions **************************/
/**
Purpose
-------
This routine computes y = alpha * A^t * x + beta * y on the GPU.
Input format is SELLP.
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
blocksize magma_int_t
number of rows in one ELL-slice
@param
slices magma_int_t
number of slices in matrix
@param
alignment magma_int_t
number of threads assigned to one row
@param
alpha float
scalar multiplier
@param
d_val float*
array containing values of A in SELLP
@param
d_colind magma_int_t*
columnindices of A in SELLP
@param
d_rowptr magma_int_t*
rowpointer of SELLP
@param
d_x float*
input vector x
@param
beta float
scalar multiplier
@param
d_y float*
input/output vector y
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_sgesellpmv( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
float alpha,
float *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
float *d_x,
float beta,
float *d_y ){
// using a 2D thread grid
int num_threads = blocksize*alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
dim3 block( blocksize, alignment, 1);
int dimgrid1 = sqrt(slices);
int dimgrid2 = (slices + dimgrid1 -1 ) / dimgrid1;
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = num_threads * sizeof( float );
#if defined(PRECISION_d) && defined(TEXTURE)
// Create channel.
hipChannelFormatDesc channel_desc;
channel_desc =
hipCreateChannelDesc(32, 32, 0, 0, hipChannelFormatKindSigned);
// Create resource descriptor.
struct hipResourceDesc resDescdx;
memset(&resDescdx, 0, sizeof(resDescdx));
resDescdx.resType = hipResourceTypeLinear;
resDescdx.res.linear.devPtr = (void*)d_x;
resDescdx.res.linear.desc = channel_desc;
resDescdx.res.linear.sizeInBytes = m*sizeof(float);
// Specify texture object parameters.
struct hipTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeClamp;
texDesc.filterMode = hipFilterModePoint;
texDesc.readMode = hipReadModeElementType;
// Create texture object.
hipTextureObject_t texdx = 0;
hipCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL);
hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte);
if( alignment == 4)
hipLaunchKernelGGL(( zgesellptmv2d_kernel_4_tex), dim3(grid), dim3(block), Ms, magma_stream ,
m, n, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, texdx, beta, d_y );
else if( alignment == 8)
hipLaunchKernelGGL(( zgesellptmv2d_kernel_8_tex), dim3(grid), dim3(block), Ms, magma_stream ,
m, n, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, texdx, beta, d_y );
else if( alignment == 16)
hipLaunchKernelGGL(( zgesellptmv2d_kernel_16_tex), dim3(grid), dim3(block), Ms, magma_stream ,
m, n, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, texdx, beta, d_y );
else if( alignment == 32)
hipLaunchKernelGGL(( zgesellptmv2d_kernel_32_tex), dim3(grid), dim3(block), Ms, magma_stream ,
m, n, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, texdx, beta, d_y );
else{
printf("error: alignment %d not supported.\n", alignment);
exit(-1);
}
hipDestroyTextureObject(texdx);
#else
if( alignment == 1)
hipLaunchKernelGGL(( zgesellptmv2d_kernel_1), dim3(grid), dim3(block), Ms, magma_stream ,
m, n, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, d_x, beta, d_y );
else if( alignment == 4)
hipLaunchKernelGGL(( zgesellptmv2d_kernel_4), dim3(grid), dim3(block), Ms, magma_stream ,
m, n, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, d_x, beta, d_y );
else if( alignment == 8)
hipLaunchKernelGGL(( zgesellptmv2d_kernel_8), dim3(grid), dim3(block), Ms, magma_stream ,
m, n, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, d_x, beta, d_y );
else if( alignment == 16)
hipLaunchKernelGGL(( zgesellptmv2d_kernel_16), dim3(grid), dim3(block), Ms, magma_stream ,
m, n, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, d_x, beta, d_y );
else if( alignment == 32)
hipLaunchKernelGGL(( zgesellptmv2d_kernel_32), dim3(grid), dim3(block), Ms, magma_stream ,
m, n, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, d_x, beta, d_y );
else{
printf("error: alignment %d not supported.\n", alignment);
exit(-1);
}
#endif
return MAGMA_SUCCESS;
}
| 6dea8c7cd9286d2a8153befeb40c25b3f8f0e365.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zgesellcmmv.cu normal z -> s, Wed Sep 17 15:08:43 2014
*/
#include "cuda_runtime.h"
#include <stdio.h>
#include "common_magma.h"
#include "sm_32_intrinsics.h"
#define PRECISION_s
//#define TEXTURE
/*
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_4_ldg( int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
const float* __restrict__ d_x,
float beta,
float *d_y)
{
#if defined(TEXTURE) && (__CUDA_ARCH__ >= 300)
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
float x1, x2, v1, v2;
d_colind += offset + ldx ;
d_val += offset + ldx;
for ( kk = 0; kk < max_-1 ; kk+=2 ){
i1 = d_colind[ block*kk];
i2 = d_colind[ block*kk + block];
x1 = __ldg( d_x+ i1 );
x2 = __ldg( d_x+ i2 );
v1 = d_val[ block*kk ];
v2 = d_val[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_){
x1 = __ldg( d_x + d_colind[ block*kk] );
v1 = d_val[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 2 ){
shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row];
}
}
}
#endif
}
*/
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning one thread to each row - 1D kernel
__global__ void
zgesellptmv2d_kernel_1( int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
float* d_x,
float beta,
float *d_y)
{
// threads assigned to rows
int Idx = blockDim.x * blockIdx.x + threadIdx.x ;
int offset = d_rowptr[ blockIdx.x ];
int border = (d_rowptr[ blockIdx.x+1 ]-offset)/blocksize;
if(Idx < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n++){
int col = d_colind [offset+ blocksize * n + threadIdx.x ];
float val = d_val[offset+ blocksize * n + threadIdx.x];
if( val != 0){
dot=dot+val*d_x[col];
}
}
d_y[ Idx ] = dot * alpha + beta * d_y [ Idx ];
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_4( int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
float* d_x,
float beta,
float *d_y)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
float x1, x2, v1, v2;
d_colind += offset + ldx ;
d_val += offset + ldx;
for ( kk = 0; kk < max_-1 ; kk+=2 ){
i1 = d_colind[ block*kk];
i2 = d_colind[ block*kk + block];
x1 = d_x[ i1 ];
x2 = d_x[ i2 ];
v1 = d_val[ block*kk ];
v2 = d_val[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_){
x1 = d_x[ d_colind[ block*kk] ];
v1 = d_val[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 2 ){
shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row];
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_8( int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
float* d_x,
float beta,
float *d_y)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
float x1, x2, v1, v2;
d_colind += offset + ldx ;
d_val += offset + ldx;
for ( kk = 0; kk < max_-1 ; kk+=2 ){
i1 = d_colind[ block*kk];
i2 = d_colind[ block*kk + block];
x1 = d_x[ i1 ];
x2 = d_x[ i2 ];
v1 = d_val[ block*kk ];
v2 = d_val[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_){
x1 = d_x[ d_colind[ block*kk] ];
v1 = d_val[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ){
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row];
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_16( int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
float* d_x,
float beta,
float *d_y)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
float val =
d_val[ offset + ldx + block*k ];
int col =
d_colind[ offset + ldx + block*k ];
dot += val * d_x[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ){
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row];
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_32( int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
float* d_x,
float beta,
float *d_y)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
float val =
d_val[ offset + ldx + block*k ];
int col =
d_colind[ offset + ldx + block*k ];
dot += val * d_x[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ){
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row];
}
}
}
}
/************************* same but using texture mem *************************/
#if defined(PRECISION_d) && defined(TEXTURE)
__inline__ __device__ float
read_from_tex( cudaTextureObject_t texdx, const int& i){
int2 temp = tex1Dfetch<int2>( texdx, i );
return __hiloint2float(temp.y,temp.x);
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_4_tex( int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
cudaTextureObject_t texdx,
float beta,
float *d_y)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
float x1, x2, v1, v2;
d_colind += offset + ldx ;
d_val += offset + ldx;
for ( kk = 0; kk < max_-1 ; kk+=2 ){
i1 = d_colind[ block*kk];
i2 = d_colind[ block*kk + block];
x1 = read_from_tex( texdx, i1 );
x2 = read_from_tex( texdx, i2 );
v1 = d_val[ block*kk ];
v2 = d_val[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_){
x1 = read_from_tex( texdx, d_colind[ block*kk] );
v1 = d_val[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 2 ){
shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row];
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_8_tex( int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
cudaTextureObject_t texdx,
float beta,
float *d_y)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
int kk, i1, i2;
float x1, x2, v1, v2;
d_colind += offset + ldx ;
d_val += offset + ldx;
for ( kk = 0; kk < max_-1 ; kk+=2 ){
i1 = d_colind[ block*kk];
i2 = d_colind[ block*kk + block];
x1 = read_from_tex( texdx, i1 );
x2 = read_from_tex( texdx, i2 );
v1 = d_val[ block*kk ];
v2 = d_val[ block*kk + block];
dot += v1 * x1;
dot += v2 * x2;
}
if (kk<max_){
x1 = read_from_tex( texdx, d_colind[ block*kk] );
v1 = d_val[ block*kk ];
dot += v1 * x1;
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ){
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row];
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_16_tex( int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
cudaTextureObject_t texdx,
float beta,
float *d_y)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
float val =
d_val[ offset + ldx + block*k ];
int col =
d_colind[ offset + ldx + block*k ];
dot += val * read_from_tex( texdx, col );
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ){
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row];
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
zgesellptmv2d_kernel_32_tex( int num_rows,
int num_cols,
int blocksize,
int T,
float alpha,
float *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
cudaTextureObject_t texdx,
float beta,
float *d_y)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ float shared[];
if(row < num_rows ){
float dot = MAGMA_S_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
float val =
d_val[ offset + ldx + block*k ];
int col =
d_colind[ offset + ldx + block*k ];
dot += val * read_from_tex( texdx, col );
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ){
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
d_y[row] =
(shared[ldx]+shared[ldx+blocksize*1])*alpha + beta*d_y [row];
}
}
}
}
#endif
/********************* end of texture versions **************************/
/**
Purpose
-------
This routine computes y = alpha * A^t * x + beta * y on the GPU.
Input format is SELLP.
Arguments
---------
@param
transA magma_trans_t
transposition parameter for A
@param
m magma_int_t
number of rows in A
@param
n magma_int_t
number of columns in A
@param
blocksize magma_int_t
number of rows in one ELL-slice
@param
slices magma_int_t
number of slices in matrix
@param
alignment magma_int_t
number of threads assigned to one row
@param
alpha float
scalar multiplier
@param
d_val float*
array containing values of A in SELLP
@param
d_colind magma_int_t*
columnindices of A in SELLP
@param
d_rowptr magma_int_t*
rowpointer of SELLP
@param
d_x float*
input vector x
@param
beta float
scalar multiplier
@param
d_y float*
input/output vector y
@ingroup magmasparse_sblas
********************************************************************/
extern "C" magma_int_t
magma_sgesellpmv( magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t blocksize,
magma_int_t slices,
magma_int_t alignment,
float alpha,
float *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
float *d_x,
float beta,
float *d_y ){
// using a 2D thread grid
int num_threads = blocksize*alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
dim3 block( blocksize, alignment, 1);
int dimgrid1 = sqrt(slices);
int dimgrid2 = (slices + dimgrid1 -1 ) / dimgrid1;
dim3 grid( dimgrid1, dimgrid2, 1);
int Ms = num_threads * sizeof( float );
#if defined(PRECISION_d) && defined(TEXTURE)
// Create channel.
cudaChannelFormatDesc channel_desc;
channel_desc =
cudaCreateChannelDesc(32, 32, 0, 0, cudaChannelFormatKindSigned);
// Create resource descriptor.
struct cudaResourceDesc resDescdx;
memset(&resDescdx, 0, sizeof(resDescdx));
resDescdx.resType = cudaResourceTypeLinear;
resDescdx.res.linear.devPtr = (void*)d_x;
resDescdx.res.linear.desc = channel_desc;
resDescdx.res.linear.sizeInBytes = m*sizeof(float);
// Specify texture object parameters.
struct cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeClamp;
texDesc.filterMode = cudaFilterModePoint;
texDesc.readMode = cudaReadModeElementType;
// Create texture object.
cudaTextureObject_t texdx = 0;
cudaCreateTextureObject(&texdx, &resDescdx, &texDesc, NULL);
cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
if( alignment == 4)
zgesellptmv2d_kernel_4_tex<<< grid, block, Ms, magma_stream >>>
( m, n, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, texdx, beta, d_y );
else if( alignment == 8)
zgesellptmv2d_kernel_8_tex<<< grid, block, Ms, magma_stream >>>
( m, n, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, texdx, beta, d_y );
else if( alignment == 16)
zgesellptmv2d_kernel_16_tex<<< grid, block, Ms, magma_stream >>>
( m, n, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, texdx, beta, d_y );
else if( alignment == 32)
zgesellptmv2d_kernel_32_tex<<< grid, block, Ms, magma_stream >>>
( m, n, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, texdx, beta, d_y );
else{
printf("error: alignment %d not supported.\n", alignment);
exit(-1);
}
cudaDestroyTextureObject(texdx);
#else
if( alignment == 1)
zgesellptmv2d_kernel_1<<< grid, block, Ms, magma_stream >>>
( m, n, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, d_x, beta, d_y );
else if( alignment == 4)
zgesellptmv2d_kernel_4<<< grid, block, Ms, magma_stream >>>
( m, n, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, d_x, beta, d_y );
else if( alignment == 8)
zgesellptmv2d_kernel_8<<< grid, block, Ms, magma_stream >>>
( m, n, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, d_x, beta, d_y );
else if( alignment == 16)
zgesellptmv2d_kernel_16<<< grid, block, Ms, magma_stream >>>
( m, n, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, d_x, beta, d_y );
else if( alignment == 32)
zgesellptmv2d_kernel_32<<< grid, block, Ms, magma_stream >>>
( m, n, blocksize, alignment, alpha,
d_val, d_colind, d_rowptr, d_x, beta, d_y );
else{
printf("error: alignment %d not supported.\n", alignment);
exit(-1);
}
#endif
return MAGMA_SUCCESS;
}
|
834bbf739ab31d0a46769971da85352fb6bd4fb3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include "timer.h"
#include "cuda_utils.h"
typedef float dtype;
#define N_ (8 * 1024 * 1024)
#define MAX_THREADS 256
#define MAX_BLOCKS 64
#define MIN(x,y) ((x < y) ? x : y)
/* return the next power of 2 number that is larger than x */
unsigned int nextPow2( unsigned int x ) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
/* find out # of threads and # thread blocks for a particular kernel */
void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
if (whichKernel < 3)
{
/* 1 thread per element */
threads = (n < maxThreads) ? nextPow2(n) : maxThreads;
blocks = (n + threads - 1) / threads;
}
else
{
/* 1 thread per 2 elements */
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
}
/* limit the total number of threads */
if (whichKernel == 5)
blocks = MIN(maxBlocks, blocks);
}
/* special type of reduction to account for floating point error */
dtype reduce_cpu(dtype *data, int n) {
dtype sum = data[0];
dtype c = (dtype)0.0;
for (int i = 1; i < n; i++)
{
dtype y = data[i] - c;
dtype t = sum + y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
__global__ void
kernel2 (dtype *input, dtype *output, unsigned int n)
{
__shared__ dtype scratch[MAX_THREADS];
unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x;
unsigned int i = bid * blockDim.x + threadIdx.x;
if(i < n) {
scratch[threadIdx.x] = input[i];
} else {
scratch[threadIdx.x] = 0;
}
__syncthreads ();
for(unsigned int s = blockDim.x/2; s > 0; s = s >> 1) {
if(threadIdx.x < s) {
scratch[threadIdx.x] += scratch[threadIdx.x + s];
}
__syncthreads ();
}
if(threadIdx.x == 0) {
output[bid] = scratch[0];
}
}
int
main(int argc, char** argv)
{
int i;
/* data structure */
dtype *h_idata, h_odata, h_cpu;
dtype *d_idata, *d_odata;
/* timer */
struct stopwatch_t* timer = NULL;
long double t_kernel_2, t_cpu;
/* which kernel are we running */
int whichKernel;
/* number of threads and thread blocks */
int threads, blocks;
int N;
if(argc > 1) {
N = atoi (argv[1]);
printf("N: %d\n", N);
} else {
N = N_;
printf("N: %d\n", N);
}
/* naive kernel */
whichKernel = 2;
getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
/* initialize timer */
stopwatch_init ();
timer = stopwatch_create ();
/* allocate memory */
h_idata = (dtype*) malloc (N * sizeof (dtype));
CUDA_CHECK_ERROR (hipMalloc (&d_idata, N * sizeof (dtype)));
CUDA_CHECK_ERROR (hipMalloc (&d_odata, blocks * sizeof (dtype)));
/* Initialize array */
srand48(time(NULL));
for(i = 0; i < N; i++) {
h_idata[i] = drand48() / 100000;
}
CUDA_CHECK_ERROR (hipMemcpy (d_idata, h_idata, N * sizeof (dtype),
hipMemcpyHostToDevice));
/* ================================================== */
/* GPU kernel */
dim3 gb(16, ((blocks + 16 - 1) / 16), 1);
dim3 tb(threads, 1, 1);
/* warm up */
hipLaunchKernelGGL(( kernel2) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N);
hipDeviceSynchronize ();
stopwatch_start (timer);
/* execute kernel */
hipLaunchKernelGGL(( kernel2) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N);
int s = blocks;
while(s > 1) {
threads = 0;
blocks = 0;
getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
dim3 gb(16, (blocks + 16 - 1) / 16, 1);
dim3 tb(threads, 1, 1);
hipLaunchKernelGGL(( kernel2) , dim3(gb), dim3(tb), 0, 0, d_odata, d_odata, s);
s = (s + threads - 1) / threads;
}
hipDeviceSynchronize ();
t_kernel_2 = stopwatch_stop (timer);
fprintf (stdout, "Time to execute sequential index GPU reduction kernel: %Lg secs\n", t_kernel_2);
double bw = (N * sizeof(dtype)) / (t_kernel_2 * 1e9);
fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw);
/* copy result back from GPU */
CUDA_CHECK_ERROR (hipMemcpy (&h_odata, d_odata, sizeof (dtype),
hipMemcpyDeviceToHost));
/* ================================================== */
/* ================================================== */
/* CPU kernel */
stopwatch_start (timer);
h_cpu = reduce_cpu (h_idata, N);
t_cpu = stopwatch_stop (timer);
fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n",
t_cpu);
/* ================================================== */
if(abs (h_odata - h_cpu) > 1e-5) {
fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu);
} else {
printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu);
}
return 0;
}
| 834bbf739ab31d0a46769971da85352fb6bd4fb3.cu | #include <stdlib.h>
#include <stdio.h>
#include "timer.h"
#include "cuda_utils.h"
typedef float dtype;
#define N_ (8 * 1024 * 1024)
#define MAX_THREADS 256
#define MAX_BLOCKS 64
#define MIN(x,y) ((x < y) ? x : y)
/* return the next power of 2 number that is larger than x */
unsigned int nextPow2( unsigned int x ) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
/* find out # of threads and # thread blocks for a particular kernel */
void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
if (whichKernel < 3)
{
/* 1 thread per element */
threads = (n < maxThreads) ? nextPow2(n) : maxThreads;
blocks = (n + threads - 1) / threads;
}
else
{
/* 1 thread per 2 elements */
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
}
/* limit the total number of threads */
if (whichKernel == 5)
blocks = MIN(maxBlocks, blocks);
}
/* special type of reduction to account for floating point error */
dtype reduce_cpu(dtype *data, int n) {
dtype sum = data[0];
dtype c = (dtype)0.0;
for (int i = 1; i < n; i++)
{
dtype y = data[i] - c;
dtype t = sum + y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
__global__ void
kernel2 (dtype *input, dtype *output, unsigned int n)
{
__shared__ dtype scratch[MAX_THREADS];
unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x;
unsigned int i = bid * blockDim.x + threadIdx.x;
if(i < n) {
scratch[threadIdx.x] = input[i];
} else {
scratch[threadIdx.x] = 0;
}
__syncthreads ();
for(unsigned int s = blockDim.x/2; s > 0; s = s >> 1) {
if(threadIdx.x < s) {
scratch[threadIdx.x] += scratch[threadIdx.x + s];
}
__syncthreads ();
}
if(threadIdx.x == 0) {
output[bid] = scratch[0];
}
}
int
main(int argc, char** argv)
{
int i;
/* data structure */
dtype *h_idata, h_odata, h_cpu;
dtype *d_idata, *d_odata;
/* timer */
struct stopwatch_t* timer = NULL;
long double t_kernel_2, t_cpu;
/* which kernel are we running */
int whichKernel;
/* number of threads and thread blocks */
int threads, blocks;
int N;
if(argc > 1) {
N = atoi (argv[1]);
printf("N: %d\n", N);
} else {
N = N_;
printf("N: %d\n", N);
}
/* naive kernel */
whichKernel = 2;
getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
/* initialize timer */
stopwatch_init ();
timer = stopwatch_create ();
/* allocate memory */
h_idata = (dtype*) malloc (N * sizeof (dtype));
CUDA_CHECK_ERROR (cudaMalloc (&d_idata, N * sizeof (dtype)));
CUDA_CHECK_ERROR (cudaMalloc (&d_odata, blocks * sizeof (dtype)));
/* Initialize array */
srand48(time(NULL));
for(i = 0; i < N; i++) {
h_idata[i] = drand48() / 100000;
}
CUDA_CHECK_ERROR (cudaMemcpy (d_idata, h_idata, N * sizeof (dtype),
cudaMemcpyHostToDevice));
/* ================================================== */
/* GPU kernel */
dim3 gb(16, ((blocks + 16 - 1) / 16), 1);
dim3 tb(threads, 1, 1);
/* warm up */
kernel2 <<<gb, tb>>> (d_idata, d_odata, N);
cudaThreadSynchronize ();
stopwatch_start (timer);
/* execute kernel */
kernel2 <<<gb, tb>>> (d_idata, d_odata, N);
int s = blocks;
while(s > 1) {
threads = 0;
blocks = 0;
getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
dim3 gb(16, (blocks + 16 - 1) / 16, 1);
dim3 tb(threads, 1, 1);
kernel2 <<<gb, tb>>> (d_odata, d_odata, s);
s = (s + threads - 1) / threads;
}
cudaThreadSynchronize ();
t_kernel_2 = stopwatch_stop (timer);
fprintf (stdout, "Time to execute sequential index GPU reduction kernel: %Lg secs\n", t_kernel_2);
double bw = (N * sizeof(dtype)) / (t_kernel_2 * 1e9);
fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw);
/* copy result back from GPU */
CUDA_CHECK_ERROR (cudaMemcpy (&h_odata, d_odata, sizeof (dtype),
cudaMemcpyDeviceToHost));
/* ================================================== */
/* ================================================== */
/* CPU kernel */
stopwatch_start (timer);
h_cpu = reduce_cpu (h_idata, N);
t_cpu = stopwatch_stop (timer);
fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n",
t_cpu);
/* ================================================== */
if(abs (h_odata - h_cpu) > 1e-5) {
fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu);
} else {
printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu);
}
return 0;
}
|
c8ad442d07a03f8c12f7d71498e5c4f1c43159c6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//===------------- objects.cu - NVPTX OpenMP GPU objects --------- CUDA -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.txt for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the data objects used on the GPU device.
//
//===----------------------------------------------------------------------===//
#include "omptarget-nvptx.h"
#include "state-queue.h"
////////////////////////////////////////////////////////////////////////////////
// global data holding OpenMP state information
////////////////////////////////////////////////////////////////////////////////
__device__
omptarget_nvptx_Queue<omptarget_nvptx_ThreadPrivateContext, OMP_STATE_COUNT>
omptarget_nvptx_device_State[MAX_SM];
// Pointer to this team's OpenMP state object
__device__ __shared__ omptarget_nvptx_ThreadPrivateContext
*omptarget_nvptx_threadPrivateContext;
////////////////////////////////////////////////////////////////////////////////
// The team master sets the outlined parallel function in this variable to
// communicate with the workers. Since it is in shared memory, there is one
// copy of these variables for each kernel, instance, and team.
////////////////////////////////////////////////////////////////////////////////
volatile __device__ __shared__ omptarget_nvptx_WorkFn omptarget_nvptx_workFn;
////////////////////////////////////////////////////////////////////////////////
// OpenMP kernel execution parameters
////////////////////////////////////////////////////////////////////////////////
__device__ __shared__ uint32_t execution_param;
////////////////////////////////////////////////////////////////////////////////
// Data sharing state
////////////////////////////////////////////////////////////////////////////////
__device__ __shared__ DataSharingStateTy DataSharingState;
////////////////////////////////////////////////////////////////////////////////
// Scratchpad for teams reduction. FIXME: allocate it in the offload library.
////////////////////////////////////////////////////////////////////////////////
// FIXME
__device__ char scratchpad[262144];
__device__ unsigned timestamp = 0;
| c8ad442d07a03f8c12f7d71498e5c4f1c43159c6.cu | //===------------- objects.cu - NVPTX OpenMP GPU objects --------- CUDA -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.txt for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the data objects used on the GPU device.
//
//===----------------------------------------------------------------------===//
#include "omptarget-nvptx.h"
#include "state-queue.h"
////////////////////////////////////////////////////////////////////////////////
// global data holding OpenMP state information
////////////////////////////////////////////////////////////////////////////////
__device__
omptarget_nvptx_Queue<omptarget_nvptx_ThreadPrivateContext, OMP_STATE_COUNT>
omptarget_nvptx_device_State[MAX_SM];
// Pointer to this team's OpenMP state object
__device__ __shared__ omptarget_nvptx_ThreadPrivateContext
*omptarget_nvptx_threadPrivateContext;
////////////////////////////////////////////////////////////////////////////////
// The team master sets the outlined parallel function in this variable to
// communicate with the workers. Since it is in shared memory, there is one
// copy of these variables for each kernel, instance, and team.
////////////////////////////////////////////////////////////////////////////////
volatile __device__ __shared__ omptarget_nvptx_WorkFn omptarget_nvptx_workFn;
////////////////////////////////////////////////////////////////////////////////
// OpenMP kernel execution parameters
////////////////////////////////////////////////////////////////////////////////
__device__ __shared__ uint32_t execution_param;
////////////////////////////////////////////////////////////////////////////////
// Data sharing state
////////////////////////////////////////////////////////////////////////////////
__device__ __shared__ DataSharingStateTy DataSharingState;
////////////////////////////////////////////////////////////////////////////////
// Scratchpad for teams reduction. FIXME: allocate it in the offload library.
////////////////////////////////////////////////////////////////////////////////
// FIXME
__device__ char scratchpad[262144];
__device__ unsigned timestamp = 0;
|
abc8a5aa4dd02673d511b31731525855eb1dcdb2.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_complex.h>
#include <fftw3.h>
#include <hipfft.h>
#include <sys/time.h>
#include <assert.h>
#include <chrono>
#include "sector.h"
#include "floats.h"
#include "dimension.h"
#include <msgpack.hpp>
#include "zhelpers.hpp"
using namespace std;
#define NUM_BYTES_PER_SAMPLE (3*2*2)
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) {
if (code != hipSuccess) {
fprintf( stderr, "GPUassert: %s %s %d\n", hipGetErrorString( code ), file, line );
if (abort) exit( code );
}
}
struct sectormsg {
int id;
vector<int>
i_hh, q_hh,
i_vv, q_vv,
i_vh, q_vh;
MSGPACK_DEFINE_MAP( id, i_hh, q_hh, i_vv, q_vv, i_vh, q_vh );
};
const int
n_sectors = 143,
n_sweeps = 1024,
n_samples = 512,
n_elevations = 9;
static const int k_range_resolution = 30;
static constexpr float k_calibration = 1941.05;
static const int ma_count = 7;
int
current_sector = 0,
current_sweep = 0,
current_sample = 0,
current_elevation = 0,
current_stream = 0;
int
hh_index_start,
vv_index_start,
vh_index_start,
input_stream_index_offset;
// host
cuFloatComplex *p_iq;
float *result;
float *hamming_coef;
cuFloatComplex *fft_ma;
// device
float *d_hamming;
cuFloatComplex *d_iq;
cuFloatComplex *d_tmp;
float *d_result;
// cufft
hipfftHandle
*fft_range_handle,
*fft_doppler_handle,
*fft_pdop_handle;
hipStream_t *streams;
zmq::context_t context( 1 );
zmq::socket_t subscriber( context, ZMQ_SUB );
zmq::socket_t publisher( context, ZMQ_PUB );
__constant__ cuFloatComplex d_ma[512];
__global__ void __apply_hamming(cuFloatComplex *a, float *b, int offset) {
const unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x;
a[offset + idx] = make_cuFloatComplex(
b[idx]*cuCrealf( a[offset + idx] ),
b[idx]*cuCimagf( a[offset + idx] ));
}
__global__ void __sum_v4(cuFloatComplex *in, cuFloatComplex *out, int offset) {
const unsigned int i = 2*blockIdx.x, j = threadIdx.x, n = blockDim.x;
extern __shared__ cuFloatComplex sdata[];
#pragma unroll
for (unsigned int d = 0; d < 2; d++) {
sdata[j + n*d] = make_cuFloatComplex(
in[offset + j + i*n + n*d].x,
in[offset + j + i*n + n*d].y );
}
__syncthreads();
for (unsigned int s = blockDim.x/2; s > 0; s >>= 1) {
if (j < s) {
#pragma unroll
for (unsigned int d = 0; d < 2; d++) {
sdata[j + n*d] = cuCaddf( sdata[j + n*d], sdata[j + n*d + s] );
}
}
__syncthreads();
}
if (j == 0) {
#pragma unroll
for (unsigned int d = 0; d < 2; d++) {
out[i*n + n*d] = sdata[j + n*d];
}
}
}
__global__ void __avgconj(cuFloatComplex *inout, cuFloatComplex *sum, int offset) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
float avgx = sum[i*n].x/(float)n;
float avgy = sum[i*n].y/(float)n;
inout[offset + j + i*n] = make_cuFloatComplex( inout[offset + j + i*n].x - avgx,
(inout[offset + j + i*n].y - avgy)*-1 );
}
__global__ void __conjugate(cuFloatComplex *a, int offset) {
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
a[offset + idx].y *= -1;
}
__global__ void __shift(cuFloatComplex *inout, int n, int offset) {
const unsigned int i = blockIdx.x, j = threadIdx.x;
cuFloatComplex temp = inout[offset + j + i*n];
inout[offset + j + i*n] = inout[offset + (j + n/2) + i*n];
inout[offset + (j + n/2) + i*n] = temp;
}
__global__ void __clip_v2(cuFloatComplex *inout, int n, int offset) {
const unsigned int i = threadIdx.x, j = n - blockIdx.x - 1;
inout[offset + j + i*n] = make_cuFloatComplex( 0, 0 );
}
__global__ void __abssqr(cuFloatComplex *inout, int offset) {
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
float real, imag;
real = cuCrealf( inout[offset + idx] );
imag = cuCimagf( inout[offset + idx] );
inout[offset + idx] = make_cuFloatComplex( real*real + imag*imag, 0 );
}
__global__ void __apply_ma(cuFloatComplex *inout, int offset) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
inout[offset + j + i*n] = cuCmulf( inout[offset + j + i*n], d_ma[j] );
}
__global__ void __scale_real(cuFloatComplex *inout, int offset) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
inout[offset + j + i*n] = make_cuFloatComplex( inout[offset + j + i*n].x/n, 0 );
}
__global__ void __sum_inplace_v4(cuFloatComplex *in, int offset) {
const unsigned int i = 2*blockIdx.x, j = threadIdx.x, n = blockDim.x;
extern __shared__ cuFloatComplex sdata[];
#pragma unroll
for (unsigned int d = 0; d < 2; d++) {
sdata[j + n*d] = make_cuFloatComplex( in[offset + j + i*n + n*d].x, in[offset + j + i*n + n*d].y );
}
__syncthreads();
for (unsigned int s = blockDim.x/2; s > 0; s >>= 1) {
if (j < s) {
#pragma unroll
for (unsigned int d = 0; d < 2; d++) {
sdata[j + n*d] = cuCaddf( sdata[j + n*d], sdata[j + n*d + s] );
}
}
__syncthreads();
}
if (j == 0) {
#pragma unroll
for (unsigned int d = 0; d < 2; d++) {
in[offset + (i*n + n*d)] = sdata[j + n*d];
}
}
}
__global__ void __calcresult_v2(
cuFloatComplex *iq,
float *out,
int n,
int offset_hh, int offset_vv, int offset_vh,
int result_offset) {
const unsigned int i = threadIdx.x;
float z = pow( i*k_range_resolution, 2.0 )*k_calibration*iq[offset_hh + i*n].x;
float zdb = 10*log10( z );
float zdr = 10*(log10( iq[offset_hh + i*n].x ) - log10( iq[offset_vv + i*n].x ));
out[result_offset + i*2 + 0] = zdb;
out[result_offset + i*2 + 1] = zdr;
}
void setup_ports() {
subscriber.connect( "tcp://localhost:5563" );
subscriber.setsockopt( ZMQ_SUBSCRIBE, "A", 1 );
publisher.bind( "tcp://*:5564" );
}
void generate_hamming_coefficients(int m, int n) {
cout << "Generating Hamming coefficients..." << endl;
// Calculate normalization power on range cell
float p_range = 0;
for (int i = 0; i < m; i++) {
p_range = p_range + pow( 0.53836 - 0.46164*cos( 2*M_PI*(i)/(m - 1)), 2.0 );
}
p_range = p_range/m;
// Calculate normalization power on Doppler cell
float p_doppler = 0;
for (int j = 0; j < n; j++) {
p_doppler = p_doppler + pow( 0.53836 - 0.46164*cos( 2*M_PI*(j)/(n - 1)), 2.0 );
}
p_doppler = p_doppler/n;
// Constant since FFT is not normalized and the power is computed w.r.t. 50ohm
const float k_wind = -1/(16383.5*m*n*sqrt( 50 ));
const float c = k_wind/sqrt( p_range*p_doppler );
// Generate elements
hamming_coef = new float[m*n];
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
hamming_coef[j + i*n] =
(0.53836 - 0.46164*cos( 2*M_PI*(i)/(m - 1)))*(0.53836 - 0.46164*cos( 2*M_PI*(j)/(n - 1)))*c;
}
}
}
void generate_ma_coefficients(Dimension4 dim, int n) {
cout << "Generating MA coefficients..." << endl;
float *ma_coef = new float[n];
float _sum = 0.0;
for (int i = 0; i < n; i++) {
ma_coef[i] = exp( -(pow( i - ((n - 1)/2), 2.0 ))/2 );
_sum += ma_coef[i];
}
for (int i = 0; i < n; i++) {
ma_coef[i] = ma_coef[i]/_sum;
}
fftwf_complex *_fft_ma = (fftwf_complex *) fftwf_malloc( sizeof( fftwf_complex )*dim.width );
fftwf_plan fft_ma_plan = fftwf_plan_dft_1d( dim.width, _fft_ma, _fft_ma, FFTW_FORWARD, FFTW_ESTIMATE );
for (int j = 0; j < n; j++) {
_fft_ma[j][0] = ma_coef[j];
_fft_ma[j][1] = 0;
}
for (int j = n; j < dim.width; j++) {
_fft_ma[j][0] = 0;
_fft_ma[j][1] = 0;
}
fftwf_execute( fft_ma_plan );
fftwf_destroy_plan( fft_ma_plan );
fft_ma = new cuFloatComplex[dim.width];
for (int j = 0; j < dim.width; j++) {
fft_ma[j] = make_cuFloatComplex( _fft_ma[j][0], _fft_ma[j][1] );
}
fftwf_free( _fft_ma );
}
void generate_constants(Dimension4 dim, int ma_count) {
cout << "Generating constants..." << endl;
generate_hamming_coefficients( dim.height, dim.width );
generate_ma_coefficients( dim, ma_count );
}
void prepare_host_arys(Dimension4 idim, Dimension4 sitdim) {
cout << "Preparing host arrays..." << endl;
gpuErrchk( hipHostMalloc((void **) &p_iq, idim.total_size*sizeof( cuFloatComplex )));
result = new float[sitdim.total_size];
}
void prepare_device_arys(Dimension4 idim, Dimension4 odim) {
cout << "Preparing device arrays..." << endl;
gpuErrchk( hipMalloc( &d_hamming, idim.m_size*sizeof( float )));
gpuErrchk( hipMalloc( &d_iq, idim.total_size*sizeof( cuFloatComplex )));
gpuErrchk( hipMalloc( &d_tmp, idim.m_size*sizeof( cuFloatComplex )));
gpuErrchk( hipMalloc( &d_result, odim.total_size*sizeof( float )));
gpuErrchk( hipMemcpy( d_hamming, hamming_coef, idim.m_size*sizeof( float ), hipMemcpyHostToDevice ));
gpuErrchk( hipMemcpyToSymbol( d_ma, fft_ma, idim.width*sizeof( cuFloatComplex ), 0, hipMemcpyHostToDevice ));
}
void prepare_arys(Dimension4 idim, Dimension4 odim, Dimension4 sitdim) {
cout << "Preparing arrays:" << endl;
prepare_host_arys( idim, sitdim );
prepare_device_arys( idim, odim );
}
void initialize_streams(Dimension4 idim, Dimension4 odim) {
cout << "Initializing streams..." << endl;
fft_range_handle = new hipfftHandle[idim.depth];
fft_doppler_handle = new hipfftHandle[idim.depth];
fft_pdop_handle = new hipfftHandle[idim.depth];
int rank = 1; // --- 1D FFTs
int nn[] = { idim.height }; // --- Size of the Fourier transform
int istride = idim.width, // --- Distance between two successive input/output elements
ostride = idim.width;
int idist = 1, odist = 1; // --- Distance between batches
int inembed[] = { 0 }; // --- Input size with pitch (ignored for 1D transforms)
int onembed[] = { 0 }; // --- Output size with pitch (ignored for 1D transforms)
int batch = idim.width; // --- Number of batched executions
streams = new hipStream_t[idim.depth];
for (int i = 0; i < idim.depth; i++) {
gpuErrchk( hipStreamCreate( &streams[i] ));
hipfftPlanMany( &fft_range_handle[i], rank, nn,
inembed, istride, idist,
onembed, ostride, odist, HIPFFT_C2C, batch );
hipfftPlan1d( &fft_doppler_handle[i], idim.width, HIPFFT_C2C, idim.height );
hipfftPlan1d( &fft_pdop_handle[i], idim.width, HIPFFT_C2C, idim.height/2 );
hipfftSetStream( fft_range_handle[i], streams[i] );
hipfftSetStream( fft_doppler_handle[i], streams[i] );
hipfftSetStream( fft_pdop_handle[i], streams[i] );
}
}
using namespace std::chrono;
uint64_t timeSinceEpochMillisec() {
using namespace std::chrono;
return duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();
}
void read_matrix(Dimension4 idim, int sector, int elevation, int stream) {
cout << "Reading matrices from network..." << endl;
uint64_t ta = timeSinceEpochMillisec();
// Read envelope with address
string address = s_recv( subscriber );
// Read message contents
string str = s_recv( subscriber );
uint64_t tb = timeSinceEpochMillisec();
cout << "Sector " << sector << ": received " << str.size() << " chars." << endl;
Sector s( idim.height, idim.width );
s.fromByteArray( (char*) str.data() );
uint64_t tc = timeSinceEpochMillisec();
int a, b;
int idx = 0;
#pragma unroll
for (int j = 0; j < idim.height; j++) {
#pragma unroll
for (int i = 0; i < idim.width; i++) {
// cin >> a >> b;
a = idx++;
b = idx++;
p_iq[idim.copy_at_depth( i, j, 0, stream )] = make_cuFloatComplex( s.hh[a], s.hh[b] );
p_iq[idim.copy_at_depth( i, j, 1, stream )] = make_cuFloatComplex( s.vv[a], s.vv[b] );
p_iq[idim.copy_at_depth( i, j, 2, stream )] = make_cuFloatComplex( s.vh[a], s.vh[b] );
}
}
uint64_t td = timeSinceEpochMillisec();
cout << "Msg rcv: " << (tb-ta) << " millis, deserialize: " << (tc-tb) << " millis, restructuring: " << (td-tc) << " millis." << endl;
// for (int j = 0; j < idim.height; j++) {
// for (int i = 0; i < idim.width; i++) {
// int idx = idim.copy_at_depth( i, j, 0, stream );
// cout << "(" << p_iq[idx].x << "," << p_iq[idx].y << ") ";
// }
// cout << endl;
// }
// exit( 0 );
}
void copy_matrix_to_device(Dimension4 idim, int sector, int elevation, int stream) {
cout << "Copying matrices to device..." << endl;
gpuErrchk( hipMemcpyAsync(
&d_iq[idim.copy_at_depth( 0, 0, 0, stream )],
&p_iq[idim.copy_at_depth( 0, 0, 0, stream )],
idim.m_size*idim.copies*sizeof( cuFloatComplex ),
hipMemcpyHostToDevice,
streams[stream] ));
}
void perform_stage_1(Dimension4 idim, int stream) {
cout << "Performing Stage I..." << endl;
int
offset_hh = idim.copy_at_depth( 0, 0, 0, stream ),
offset_vv = idim.copy_at_depth( 0, 0, 1, stream ),
offset_vh = idim.copy_at_depth( 0, 0, 2, stream );
// apply Hamming coefficients
hipLaunchKernelGGL(( __apply_hamming), dim3(idim.height), dim3(idim.width), 0, streams[stream], d_iq, d_hamming, offset_hh );
hipLaunchKernelGGL(( __apply_hamming), dim3(idim.height), dim3(idim.width), 0, streams[stream], d_iq, d_hamming, offset_vv );
hipLaunchKernelGGL(( __apply_hamming), dim3(idim.height), dim3(idim.width), 0, streams[stream], d_iq, d_hamming, offset_vh );
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
// FFT range profile
hipfftExecC2C( fft_range_handle[stream], &d_iq[offset_hh], &d_iq[offset_hh], HIPFFT_FORWARD );
hipfftExecC2C( fft_range_handle[stream], &d_iq[offset_vv], &d_iq[offset_vv], HIPFFT_FORWARD );
hipfftExecC2C( fft_range_handle[stream], &d_iq[offset_vh], &d_iq[offset_vh], HIPFFT_FORWARD );
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
// FFT+shift Doppler profile
hipLaunchKernelGGL(( __sum_v4), dim3(idim.height/2), dim3(idim.width), 2*idim.width*sizeof(cuFloatComplex), streams[stream], d_iq, d_tmp, offset_hh );
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
hipLaunchKernelGGL(( __avgconj), dim3(idim.height), dim3(idim.width), 0, streams[stream], d_iq, d_tmp, offset_hh );
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
hipLaunchKernelGGL(( __sum_v4), dim3(idim.height/2), dim3(idim.width), 2*idim.width*sizeof(cuFloatComplex), streams[stream], d_iq, d_tmp, offset_vv );
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
hipLaunchKernelGGL(( __avgconj), dim3(idim.height), dim3(idim.width), 0, streams[stream], d_iq, d_tmp, offset_vv );
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
hipLaunchKernelGGL(( __sum_v4), dim3(idim.height/2), dim3(idim.width), 2*idim.width*sizeof(cuFloatComplex), streams[stream], d_iq, d_tmp, offset_vh );
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
hipLaunchKernelGGL(( __avgconj), dim3(idim.height), dim3(idim.width), 0, streams[stream], d_iq, d_tmp, offset_vh );
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
hipfftExecC2C( fft_doppler_handle[stream], &d_iq[offset_hh], &d_iq[offset_hh], HIPFFT_FORWARD );
hipfftExecC2C( fft_doppler_handle[stream], &d_iq[offset_vv], &d_iq[offset_vv], HIPFFT_FORWARD );
hipfftExecC2C( fft_doppler_handle[stream], &d_iq[offset_vh], &d_iq[offset_vh], HIPFFT_FORWARD );
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
hipLaunchKernelGGL(( __conjugate), dim3(idim.height), dim3(idim.width), 0, streams[stream], d_iq, offset_hh );
hipLaunchKernelGGL(( __conjugate), dim3(idim.height), dim3(idim.width), 0, streams[stream], d_iq, offset_vv );
hipLaunchKernelGGL(( __conjugate), dim3(idim.height), dim3(idim.width), 0, streams[stream], d_iq, offset_vh );
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
hipLaunchKernelGGL(( __shift), dim3(idim.height), dim3(idim.width/2), 0, streams[stream], d_iq, idim.width, offset_hh );
hipLaunchKernelGGL(( __shift), dim3(idim.height), dim3(idim.width/2), 0, streams[stream], d_iq, idim.width, offset_vv );
hipLaunchKernelGGL(( __shift), dim3(idim.height), dim3(idim.width/2), 0, streams[stream], d_iq, idim.width, offset_vh );
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
hipLaunchKernelGGL(( __clip_v2), dim3(2), dim3(idim.height), 0, streams[stream], d_iq, idim.width, offset_hh );
hipLaunchKernelGGL(( __clip_v2), dim3(2), dim3(idim.height), 0, streams[stream], d_iq, idim.width, offset_vv );
hipLaunchKernelGGL(( __clip_v2), dim3(2), dim3(idim.height), 0, streams[stream], d_iq, idim.width, offset_vh );
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
}
void perform_stage_2(Dimension4 idim, int stream) {
cout << "Performing Stage II..." << endl;
int
offset_hh = idim.copy_at_depth( 0, 0, 0, stream ),
offset_vv = idim.copy_at_depth( 0, 0, 1, stream ),
offset_vh = idim.copy_at_depth( 0, 0, 2, stream );
// Get absolute value squared
hipLaunchKernelGGL(( __abssqr), dim3(idim.height/2), dim3(idim.width), 0, streams[stream], d_iq, offset_hh );
hipLaunchKernelGGL(( __abssqr), dim3(idim.height/2), dim3(idim.width), 0, streams[stream], d_iq, offset_vv );
hipLaunchKernelGGL(( __abssqr), dim3(idim.height/2), dim3(idim.width), 0, streams[stream], d_iq, offset_vh );
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
// FFT PDOP
hipfftExecC2C( fft_pdop_handle[stream], &d_iq[offset_hh], &d_iq[offset_hh], HIPFFT_FORWARD );
hipfftExecC2C( fft_pdop_handle[stream], &d_iq[offset_vv], &d_iq[offset_vv], HIPFFT_FORWARD );
hipfftExecC2C( fft_pdop_handle[stream], &d_iq[offset_vh], &d_iq[offset_vh], HIPFFT_FORWARD );
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
// Apply MA coefficients
hipLaunchKernelGGL(( __apply_ma), dim3(idim.height/2), dim3(idim.width), 0, streams[stream], d_iq, offset_hh );
hipLaunchKernelGGL(( __apply_ma), dim3(idim.height/2), dim3(idim.width), 0, streams[stream], d_iq, offset_vv );
hipLaunchKernelGGL(( __apply_ma), dim3(idim.height/2), dim3(idim.width), 0, streams[stream], d_iq, offset_vh );
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
// Inverse FFT
hipfftExecC2C( fft_pdop_handle[stream], &d_iq[offset_hh], &d_iq[offset_hh], HIPFFT_BACKWARD );
hipfftExecC2C( fft_pdop_handle[stream], &d_iq[offset_vv], &d_iq[offset_vv], HIPFFT_BACKWARD );
hipfftExecC2C( fft_pdop_handle[stream], &d_iq[offset_vh], &d_iq[offset_vh], HIPFFT_BACKWARD );
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
hipLaunchKernelGGL(( __scale_real), dim3(idim.height/2), dim3(idim.width), 0, streams[stream], d_iq, offset_hh );
hipLaunchKernelGGL(( __scale_real), dim3(idim.height/2), dim3(idim.width), 0, streams[stream], d_iq, offset_vv );
hipLaunchKernelGGL(( __scale_real), dim3(idim.height/2), dim3(idim.width), 0, streams[stream], d_iq, offset_vh );
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
// Sum
hipLaunchKernelGGL(( __sum_inplace_v4), dim3(idim.height/4), dim3(idim.width), 2*idim.width*sizeof(cuFloatComplex), streams[stream], d_iq,
offset_hh );
hipLaunchKernelGGL(( __sum_inplace_v4), dim3(idim.height/4), dim3(idim.width), 2*idim.width*sizeof(cuFloatComplex), streams[stream], d_iq,
offset_vv );
hipLaunchKernelGGL(( __sum_inplace_v4), dim3(idim.height/4), dim3(idim.width), 2*idim.width*sizeof(cuFloatComplex), streams[stream], d_iq,
offset_vh );
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
}
void perform_stage_3(Dimension4 idim, Dimension4 odim, int sector, int elevation, int stream) {
cout << "Performing Stage III..." << endl;
int
offset_hh = idim.copy_at_depth( 0, 0, 0, stream ),
offset_vv = idim.copy_at_depth( 0, 0, 1, stream ),
offset_vh = idim.copy_at_depth( 0, 0, 2, stream );
// Calculate ZdB, Zdr
hipLaunchKernelGGL(( __calcresult_v2), dim3(1), dim3(idim.height/2), 0, streams[stream],
d_iq,
d_result,
idim.width,
offset_hh, offset_vv, offset_vh,
odim.copy_at_depth( 0, 0, 0, stream ));
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
}
void advance(Dimension4 idim) {
cout << "Advancing to next sector..." << endl;
current_sector = (current_sector + 1)%n_sectors;
if (current_sector == 0) {
current_elevation = (current_elevation + 1)%n_elevations;
}
current_stream = (current_stream + 1)%idim.depth;
}
void copy_result_to_host(Dimension4 idim, Dimension4 odim, Dimension4 sitdim, int sector, int elevation, int stream) {
/*
cout << 1 << endl;
cuFloatComplex *dump = new cuFloatComplex[idim.m_size];
cout << 2 << endl;
hipMemcpyAsync(
dump,
&d_iq[idim.copy_at_depth( 0, 0, 0, stream )],
idim.m_size * sizeof( cuFloatComplex ),
hipMemcpyDeviceToHost,
streams[stream] );
cout << 3 << endl;
for (int j = 0; j < idim.height/2; j++) {
for (int i = 0; i < idim.width; i++) {
int idx = idim.copy_at_depth( i, j, 0, 0 );
cout << "(" << dump[idx].x << "," << dump[idx].y << ") ";
}
cout << endl;
}
cout << 4 << endl;
exit( 0 );
*/
cout << "Copying result to host..." << endl;
gpuErrchk( hipMemcpyAsync(
&result[sitdim.copy_at_depth( 0, 0, sector, elevation )],
&d_result[odim.copy_at_depth( 0, 0, 0, stream )],
odim.m_size*sizeof( float ),
hipMemcpyDeviceToHost,
streams[stream] ));
// cout << "zdb:" << endl;
// for (int i=0; i<sitdim.height; i++) {
// cout << result[sitdim.copy_at_depth(0,i,sector,elevation)] << endl;
// }
// exit(0);
}
void send_results(Dimension4 sitdim, int sector, int elevation) {
cout << "Sending results to network..." << endl;
float *zdb = new float[sitdim.height];
float *zdr = new float[sitdim.height];
for (int i = 0; i < sitdim.height; i++) {
zdb[i] = result[sitdim.copy_at_depth( 0, i, sector, elevation )];
zdr[i] = result[sitdim.copy_at_depth( 1, i, sector, elevation )];
}
int buff_size = sizeof( float )*sitdim.height + 4; // + 2 for sector id + 2 for elevation
unsigned char *zdb_buff = new unsigned char[buff_size];
unsigned char *zdr_buff = new unsigned char[buff_size];
zdb_buff[0] = (sector >> 8) & 0xff;
zdb_buff[1] = (sector) & 0xff;
zdb_buff[2] = (elevation >> 8) & 0xff;
zdb_buff[3] = (elevation) & 0xff;
zdr_buff[0] = (sector >> 8) & 0xff;
zdr_buff[1] = (sector) & 0xff;
zdr_buff[2] = (elevation >> 8) & 0xff;
zdr_buff[3] = (elevation) & 0xff;
aftoab( zdb, sitdim.height, &zdb_buff[4] );
aftoab( zdr, sitdim.height, &zdr_buff[4] );
stringstream localStream;
// zdb
localStream.rdbuf()->pubsetbuf( (char*) &zdb_buff[0], buff_size );
std::string str_zdb( localStream.str() );
cout << "Sector " << sector << ": sending ZdB " << str_zdb.size() << " chars...";
s_sendmore( publisher, (std::string) "B" );
s_send( publisher, (std::string) str_zdb );
cout << " Done." << endl;
// zdr
localStream.rdbuf()->pubsetbuf( (char*) &zdr_buff[0], buff_size );
std::string str_zdr( localStream.str() );
cout << "Sector " << sector << ": sending Zdr " << str_zdr.size() << " chars...";
s_sendmore( publisher, (std::string) "C" );
s_send( publisher, (std::string) str_zdr );
cout << " Done." << endl;
}
void do_process(Dimension4 idim, Dimension4 odim, Dimension4 sitdim) {
cout << "Starting main loop..." << endl;
read_matrix( idim, current_sector, current_elevation, current_stream );
copy_matrix_to_device( idim, current_sector, current_elevation, current_stream );
do {
perform_stage_1( idim, current_stream );
perform_stage_2( idim, current_stream );
perform_stage_3( idim, odim, current_sector, current_elevation, current_stream );
int
prev_sector = current_sector,
prev_elevation = current_elevation,
prev_stream = current_stream;
advance( idim );
read_matrix( idim, current_sector, current_elevation, current_stream );
copy_matrix_to_device( idim, current_sector, current_elevation, current_stream );
copy_result_to_host( idim, odim, sitdim, prev_sector, prev_elevation, prev_stream );
send_results( sitdim, prev_sector, prev_elevation );
} while (true);
}
void destroy_streams(int n) {
cout << "Destroying streams..." << endl;
for (int i = 0; i < n; i++) {
gpuErrchk( hipStreamDestroy( streams[i] ));
}
}
void destroy_device_arys() {
cout << "Destroying device arrays..." << endl;
gpuErrchk( hipFree( d_hamming ));
gpuErrchk( hipFree( d_iq ));
gpuErrchk( hipFree( d_tmp ));
gpuErrchk( hipFree( d_result ));
}
void destroy_host_arys() {
cout << "Destroying host arrays..." << endl;
gpuErrchk( hipHostFree( p_iq ));
delete[] result;
delete[] hamming_coef;
delete[] fft_ma;
delete[] fft_range_handle,
delete[] fft_doppler_handle,
delete[] fft_pdop_handle;
}
void destroy_arrays() {
cout << "Destroying arrays:" << endl;
destroy_device_arys();
destroy_host_arys();
}
int main(int argc, char **argv) {
ios_base::sync_with_stdio( false );
int num_streams = 2;
if (argc > 1) {
num_streams = atoi( argv[1] );
num_streams = num_streams < 2 ? 2 : num_streams;
}
Dimension4 idim( n_samples, n_sweeps, 3, num_streams );
Dimension4 odim( 2, n_sweeps/2, 1, num_streams );
Dimension4 sitdim( 2, n_sweeps/2, n_sectors, n_elevations );
setup_ports();
generate_constants( idim, ma_count );
prepare_arys( idim, odim, sitdim );
initialize_streams( idim, odim );
do_process( idim, odim, sitdim );
destroy_streams( num_streams );
destroy_arrays();
gpuErrchk( hipDeviceReset() );
return 0;
}
| abc8a5aa4dd02673d511b31731525855eb1dcdb2.cu | #include <iostream>
#include <stdlib.h>
#include <cuda.h>
#include <cuComplex.h>
#include <fftw3.h>
#include <cufft.h>
#include <sys/time.h>
#include <assert.h>
#include <chrono>
#include "sector.h"
#include "floats.h"
#include "dimension.h"
#include <msgpack.hpp>
#include "zhelpers.hpp"
using namespace std;
#define NUM_BYTES_PER_SAMPLE (3*2*2)
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) {
if (code != cudaSuccess) {
fprintf( stderr, "GPUassert: %s %s %d\n", cudaGetErrorString( code ), file, line );
if (abort) exit( code );
}
}
struct sectormsg {
int id;
vector<int>
i_hh, q_hh,
i_vv, q_vv,
i_vh, q_vh;
MSGPACK_DEFINE_MAP( id, i_hh, q_hh, i_vv, q_vv, i_vh, q_vh );
};
const int
n_sectors = 143,
n_sweeps = 1024,
n_samples = 512,
n_elevations = 9;
static const int k_range_resolution = 30;
static constexpr float k_calibration = 1941.05;
static const int ma_count = 7;
int
current_sector = 0,
current_sweep = 0,
current_sample = 0,
current_elevation = 0,
current_stream = 0;
int
hh_index_start,
vv_index_start,
vh_index_start,
input_stream_index_offset;
// host
cuFloatComplex *p_iq;
float *result;
float *hamming_coef;
cuFloatComplex *fft_ma;
// device
float *d_hamming;
cuFloatComplex *d_iq;
cuFloatComplex *d_tmp;
float *d_result;
// cufft
cufftHandle
*fft_range_handle,
*fft_doppler_handle,
*fft_pdop_handle;
cudaStream_t *streams;
zmq::context_t context( 1 );
zmq::socket_t subscriber( context, ZMQ_SUB );
zmq::socket_t publisher( context, ZMQ_PUB );
__constant__ cuFloatComplex d_ma[512];
__global__ void __apply_hamming(cuFloatComplex *a, float *b, int offset) {
const unsigned int idx = threadIdx.x + blockIdx.x*blockDim.x;
a[offset + idx] = make_cuFloatComplex(
b[idx]*cuCrealf( a[offset + idx] ),
b[idx]*cuCimagf( a[offset + idx] ));
}
__global__ void __sum_v4(cuFloatComplex *in, cuFloatComplex *out, int offset) {
const unsigned int i = 2*blockIdx.x, j = threadIdx.x, n = blockDim.x;
extern __shared__ cuFloatComplex sdata[];
#pragma unroll
for (unsigned int d = 0; d < 2; d++) {
sdata[j + n*d] = make_cuFloatComplex(
in[offset + j + i*n + n*d].x,
in[offset + j + i*n + n*d].y );
}
__syncthreads();
for (unsigned int s = blockDim.x/2; s > 0; s >>= 1) {
if (j < s) {
#pragma unroll
for (unsigned int d = 0; d < 2; d++) {
sdata[j + n*d] = cuCaddf( sdata[j + n*d], sdata[j + n*d + s] );
}
}
__syncthreads();
}
if (j == 0) {
#pragma unroll
for (unsigned int d = 0; d < 2; d++) {
out[i*n + n*d] = sdata[j + n*d];
}
}
}
__global__ void __avgconj(cuFloatComplex *inout, cuFloatComplex *sum, int offset) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
float avgx = sum[i*n].x/(float)n;
float avgy = sum[i*n].y/(float)n;
inout[offset + j + i*n] = make_cuFloatComplex( inout[offset + j + i*n].x - avgx,
(inout[offset + j + i*n].y - avgy)*-1 );
}
__global__ void __conjugate(cuFloatComplex *a, int offset) {
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
a[offset + idx].y *= -1;
}
__global__ void __shift(cuFloatComplex *inout, int n, int offset) {
const unsigned int i = blockIdx.x, j = threadIdx.x;
cuFloatComplex temp = inout[offset + j + i*n];
inout[offset + j + i*n] = inout[offset + (j + n/2) + i*n];
inout[offset + (j + n/2) + i*n] = temp;
}
__global__ void __clip_v2(cuFloatComplex *inout, int n, int offset) {
const unsigned int i = threadIdx.x, j = n - blockIdx.x - 1;
inout[offset + j + i*n] = make_cuFloatComplex( 0, 0 );
}
__global__ void __abssqr(cuFloatComplex *inout, int offset) {
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
float real, imag;
real = cuCrealf( inout[offset + idx] );
imag = cuCimagf( inout[offset + idx] );
inout[offset + idx] = make_cuFloatComplex( real*real + imag*imag, 0 );
}
__global__ void __apply_ma(cuFloatComplex *inout, int offset) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
inout[offset + j + i*n] = cuCmulf( inout[offset + j + i*n], d_ma[j] );
}
__global__ void __scale_real(cuFloatComplex *inout, int offset) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
inout[offset + j + i*n] = make_cuFloatComplex( inout[offset + j + i*n].x/n, 0 );
}
__global__ void __sum_inplace_v4(cuFloatComplex *in, int offset) {
const unsigned int i = 2*blockIdx.x, j = threadIdx.x, n = blockDim.x;
extern __shared__ cuFloatComplex sdata[];
#pragma unroll
for (unsigned int d = 0; d < 2; d++) {
sdata[j + n*d] = make_cuFloatComplex( in[offset + j + i*n + n*d].x, in[offset + j + i*n + n*d].y );
}
__syncthreads();
for (unsigned int s = blockDim.x/2; s > 0; s >>= 1) {
if (j < s) {
#pragma unroll
for (unsigned int d = 0; d < 2; d++) {
sdata[j + n*d] = cuCaddf( sdata[j + n*d], sdata[j + n*d + s] );
}
}
__syncthreads();
}
if (j == 0) {
#pragma unroll
for (unsigned int d = 0; d < 2; d++) {
in[offset + (i*n + n*d)] = sdata[j + n*d];
}
}
}
__global__ void __calcresult_v2(
cuFloatComplex *iq,
float *out,
int n,
int offset_hh, int offset_vv, int offset_vh,
int result_offset) {
const unsigned int i = threadIdx.x;
float z = pow( i*k_range_resolution, 2.0 )*k_calibration*iq[offset_hh + i*n].x;
float zdb = 10*log10( z );
float zdr = 10*(log10( iq[offset_hh + i*n].x ) - log10( iq[offset_vv + i*n].x ));
out[result_offset + i*2 + 0] = zdb;
out[result_offset + i*2 + 1] = zdr;
}
void setup_ports() {
subscriber.connect( "tcp://localhost:5563" );
subscriber.setsockopt( ZMQ_SUBSCRIBE, "A", 1 );
publisher.bind( "tcp://*:5564" );
}
void generate_hamming_coefficients(int m, int n) {
cout << "Generating Hamming coefficients..." << endl;
// Calculate normalization power on range cell
float p_range = 0;
for (int i = 0; i < m; i++) {
p_range = p_range + pow( 0.53836 - 0.46164*cos( 2*M_PI*(i)/(m - 1)), 2.0 );
}
p_range = p_range/m;
// Calculate normalization power on Doppler cell
float p_doppler = 0;
for (int j = 0; j < n; j++) {
p_doppler = p_doppler + pow( 0.53836 - 0.46164*cos( 2*M_PI*(j)/(n - 1)), 2.0 );
}
p_doppler = p_doppler/n;
// Constant since FFT is not normalized and the power is computed w.r.t. 50ohm
const float k_wind = -1/(16383.5*m*n*sqrt( 50 ));
const float c = k_wind/sqrt( p_range*p_doppler );
// Generate elements
hamming_coef = new float[m*n];
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
hamming_coef[j + i*n] =
(0.53836 - 0.46164*cos( 2*M_PI*(i)/(m - 1)))*(0.53836 - 0.46164*cos( 2*M_PI*(j)/(n - 1)))*c;
}
}
}
void generate_ma_coefficients(Dimension4 dim, int n) {
cout << "Generating MA coefficients..." << endl;
float *ma_coef = new float[n];
float _sum = 0.0;
for (int i = 0; i < n; i++) {
ma_coef[i] = exp( -(pow( i - ((n - 1)/2), 2.0 ))/2 );
_sum += ma_coef[i];
}
for (int i = 0; i < n; i++) {
ma_coef[i] = ma_coef[i]/_sum;
}
fftwf_complex *_fft_ma = (fftwf_complex *) fftwf_malloc( sizeof( fftwf_complex )*dim.width );
fftwf_plan fft_ma_plan = fftwf_plan_dft_1d( dim.width, _fft_ma, _fft_ma, FFTW_FORWARD, FFTW_ESTIMATE );
for (int j = 0; j < n; j++) {
_fft_ma[j][0] = ma_coef[j];
_fft_ma[j][1] = 0;
}
for (int j = n; j < dim.width; j++) {
_fft_ma[j][0] = 0;
_fft_ma[j][1] = 0;
}
fftwf_execute( fft_ma_plan );
fftwf_destroy_plan( fft_ma_plan );
fft_ma = new cuFloatComplex[dim.width];
for (int j = 0; j < dim.width; j++) {
fft_ma[j] = make_cuFloatComplex( _fft_ma[j][0], _fft_ma[j][1] );
}
fftwf_free( _fft_ma );
}
void generate_constants(Dimension4 dim, int ma_count) {
cout << "Generating constants..." << endl;
generate_hamming_coefficients( dim.height, dim.width );
generate_ma_coefficients( dim, ma_count );
}
void prepare_host_arys(Dimension4 idim, Dimension4 sitdim) {
cout << "Preparing host arrays..." << endl;
gpuErrchk( cudaMallocHost((void **) &p_iq, idim.total_size*sizeof( cuFloatComplex )));
result = new float[sitdim.total_size];
}
void prepare_device_arys(Dimension4 idim, Dimension4 odim) {
cout << "Preparing device arrays..." << endl;
gpuErrchk( cudaMalloc( &d_hamming, idim.m_size*sizeof( float )));
gpuErrchk( cudaMalloc( &d_iq, idim.total_size*sizeof( cuFloatComplex )));
gpuErrchk( cudaMalloc( &d_tmp, idim.m_size*sizeof( cuFloatComplex )));
gpuErrchk( cudaMalloc( &d_result, odim.total_size*sizeof( float )));
gpuErrchk( cudaMemcpy( d_hamming, hamming_coef, idim.m_size*sizeof( float ), cudaMemcpyHostToDevice ));
gpuErrchk( cudaMemcpyToSymbol( d_ma, fft_ma, idim.width*sizeof( cuFloatComplex ), 0, cudaMemcpyHostToDevice ));
}
void prepare_arys(Dimension4 idim, Dimension4 odim, Dimension4 sitdim) {
cout << "Preparing arrays:" << endl;
prepare_host_arys( idim, sitdim );
prepare_device_arys( idim, odim );
}
void initialize_streams(Dimension4 idim, Dimension4 odim) {
cout << "Initializing streams..." << endl;
fft_range_handle = new cufftHandle[idim.depth];
fft_doppler_handle = new cufftHandle[idim.depth];
fft_pdop_handle = new cufftHandle[idim.depth];
int rank = 1; // --- 1D FFTs
int nn[] = { idim.height }; // --- Size of the Fourier transform
int istride = idim.width, // --- Distance between two successive input/output elements
ostride = idim.width;
int idist = 1, odist = 1; // --- Distance between batches
int inembed[] = { 0 }; // --- Input size with pitch (ignored for 1D transforms)
int onembed[] = { 0 }; // --- Output size with pitch (ignored for 1D transforms)
int batch = idim.width; // --- Number of batched executions
streams = new cudaStream_t[idim.depth];
for (int i = 0; i < idim.depth; i++) {
gpuErrchk( cudaStreamCreate( &streams[i] ));
cufftPlanMany( &fft_range_handle[i], rank, nn,
inembed, istride, idist,
onembed, ostride, odist, CUFFT_C2C, batch );
cufftPlan1d( &fft_doppler_handle[i], idim.width, CUFFT_C2C, idim.height );
cufftPlan1d( &fft_pdop_handle[i], idim.width, CUFFT_C2C, idim.height/2 );
cufftSetStream( fft_range_handle[i], streams[i] );
cufftSetStream( fft_doppler_handle[i], streams[i] );
cufftSetStream( fft_pdop_handle[i], streams[i] );
}
}
using namespace std::chrono;
uint64_t timeSinceEpochMillisec() {
using namespace std::chrono;
return duration_cast<milliseconds>(system_clock::now().time_since_epoch()).count();
}
void read_matrix(Dimension4 idim, int sector, int elevation, int stream) {
cout << "Reading matrices from network..." << endl;
uint64_t ta = timeSinceEpochMillisec();
// Read envelope with address
string address = s_recv( subscriber );
// Read message contents
string str = s_recv( subscriber );
uint64_t tb = timeSinceEpochMillisec();
cout << "Sector " << sector << ": received " << str.size() << " chars." << endl;
Sector s( idim.height, idim.width );
s.fromByteArray( (char*) str.data() );
uint64_t tc = timeSinceEpochMillisec();
int a, b;
int idx = 0;
#pragma unroll
for (int j = 0; j < idim.height; j++) {
#pragma unroll
for (int i = 0; i < idim.width; i++) {
// cin >> a >> b;
a = idx++;
b = idx++;
p_iq[idim.copy_at_depth( i, j, 0, stream )] = make_cuFloatComplex( s.hh[a], s.hh[b] );
p_iq[idim.copy_at_depth( i, j, 1, stream )] = make_cuFloatComplex( s.vv[a], s.vv[b] );
p_iq[idim.copy_at_depth( i, j, 2, stream )] = make_cuFloatComplex( s.vh[a], s.vh[b] );
}
}
uint64_t td = timeSinceEpochMillisec();
cout << "Msg rcv: " << (tb-ta) << " millis, deserialize: " << (tc-tb) << " millis, restructuring: " << (td-tc) << " millis." << endl;
// for (int j = 0; j < idim.height; j++) {
// for (int i = 0; i < idim.width; i++) {
// int idx = idim.copy_at_depth( i, j, 0, stream );
// cout << "(" << p_iq[idx].x << "," << p_iq[idx].y << ") ";
// }
// cout << endl;
// }
// exit( 0 );
}
void copy_matrix_to_device(Dimension4 idim, int sector, int elevation, int stream) {
cout << "Copying matrices to device..." << endl;
gpuErrchk( cudaMemcpyAsync(
&d_iq[idim.copy_at_depth( 0, 0, 0, stream )],
&p_iq[idim.copy_at_depth( 0, 0, 0, stream )],
idim.m_size*idim.copies*sizeof( cuFloatComplex ),
cudaMemcpyHostToDevice,
streams[stream] ));
}
void perform_stage_1(Dimension4 idim, int stream) {
cout << "Performing Stage I..." << endl;
int
offset_hh = idim.copy_at_depth( 0, 0, 0, stream ),
offset_vv = idim.copy_at_depth( 0, 0, 1, stream ),
offset_vh = idim.copy_at_depth( 0, 0, 2, stream );
// apply Hamming coefficients
__apply_hamming<<<idim.height, idim.width, 0, streams[stream]>>>( d_iq, d_hamming, offset_hh );
__apply_hamming<<<idim.height, idim.width, 0, streams[stream]>>>( d_iq, d_hamming, offset_vv );
__apply_hamming<<<idim.height, idim.width, 0, streams[stream]>>>( d_iq, d_hamming, offset_vh );
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
// FFT range profile
cufftExecC2C( fft_range_handle[stream], &d_iq[offset_hh], &d_iq[offset_hh], CUFFT_FORWARD );
cufftExecC2C( fft_range_handle[stream], &d_iq[offset_vv], &d_iq[offset_vv], CUFFT_FORWARD );
cufftExecC2C( fft_range_handle[stream], &d_iq[offset_vh], &d_iq[offset_vh], CUFFT_FORWARD );
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
// FFT+shift Doppler profile
__sum_v4<<<idim.height/2, idim.width, 2*idim.width*sizeof(cuFloatComplex), streams[stream]>>>( d_iq, d_tmp, offset_hh );
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
__avgconj<<<idim.height, idim.width, 0, streams[stream]>>>( d_iq, d_tmp, offset_hh );
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
__sum_v4<<<idim.height/2, idim.width, 2*idim.width*sizeof(cuFloatComplex), streams[stream]>>>( d_iq, d_tmp, offset_vv );
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
__avgconj<<<idim.height, idim.width, 0, streams[stream]>>>( d_iq, d_tmp, offset_vv );
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
__sum_v4<<<idim.height/2, idim.width, 2*idim.width*sizeof(cuFloatComplex), streams[stream]>>>( d_iq, d_tmp, offset_vh );
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
__avgconj<<<idim.height, idim.width, 0, streams[stream]>>>( d_iq, d_tmp, offset_vh );
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
cufftExecC2C( fft_doppler_handle[stream], &d_iq[offset_hh], &d_iq[offset_hh], CUFFT_FORWARD );
cufftExecC2C( fft_doppler_handle[stream], &d_iq[offset_vv], &d_iq[offset_vv], CUFFT_FORWARD );
cufftExecC2C( fft_doppler_handle[stream], &d_iq[offset_vh], &d_iq[offset_vh], CUFFT_FORWARD );
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
__conjugate<<<idim.height, idim.width, 0, streams[stream]>>>( d_iq, offset_hh );
__conjugate<<<idim.height, idim.width, 0, streams[stream]>>>( d_iq, offset_vv );
__conjugate<<<idim.height, idim.width, 0, streams[stream]>>>( d_iq, offset_vh );
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
__shift<<<idim.height, idim.width/2, 0, streams[stream]>>>( d_iq, idim.width, offset_hh );
__shift<<<idim.height, idim.width/2, 0, streams[stream]>>>( d_iq, idim.width, offset_vv );
__shift<<<idim.height, idim.width/2, 0, streams[stream]>>>( d_iq, idim.width, offset_vh );
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
__clip_v2<<<2, idim.height, 0, streams[stream]>>>( d_iq, idim.width, offset_hh );
__clip_v2<<<2, idim.height, 0, streams[stream]>>>( d_iq, idim.width, offset_vv );
__clip_v2<<<2, idim.height, 0, streams[stream]>>>( d_iq, idim.width, offset_vh );
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
}
void perform_stage_2(Dimension4 idim, int stream) {
cout << "Performing Stage II..." << endl;
int
offset_hh = idim.copy_at_depth( 0, 0, 0, stream ),
offset_vv = idim.copy_at_depth( 0, 0, 1, stream ),
offset_vh = idim.copy_at_depth( 0, 0, 2, stream );
// Get absolute value squared
__abssqr<<<idim.height/2, idim.width, 0, streams[stream]>>>( d_iq, offset_hh );
__abssqr<<<idim.height/2, idim.width, 0, streams[stream]>>>( d_iq, offset_vv );
__abssqr<<<idim.height/2, idim.width, 0, streams[stream]>>>( d_iq, offset_vh );
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
// FFT PDOP
cufftExecC2C( fft_pdop_handle[stream], &d_iq[offset_hh], &d_iq[offset_hh], CUFFT_FORWARD );
cufftExecC2C( fft_pdop_handle[stream], &d_iq[offset_vv], &d_iq[offset_vv], CUFFT_FORWARD );
cufftExecC2C( fft_pdop_handle[stream], &d_iq[offset_vh], &d_iq[offset_vh], CUFFT_FORWARD );
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
// Apply MA coefficients
__apply_ma<<<idim.height/2, idim.width, 0, streams[stream]>>>( d_iq, offset_hh );
__apply_ma<<<idim.height/2, idim.width, 0, streams[stream]>>>( d_iq, offset_vv );
__apply_ma<<<idim.height/2, idim.width, 0, streams[stream]>>>( d_iq, offset_vh );
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
// Inverse FFT
cufftExecC2C( fft_pdop_handle[stream], &d_iq[offset_hh], &d_iq[offset_hh], CUFFT_INVERSE );
cufftExecC2C( fft_pdop_handle[stream], &d_iq[offset_vv], &d_iq[offset_vv], CUFFT_INVERSE );
cufftExecC2C( fft_pdop_handle[stream], &d_iq[offset_vh], &d_iq[offset_vh], CUFFT_INVERSE );
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
__scale_real<<<idim.height/2, idim.width, 0, streams[stream]>>>( d_iq, offset_hh );
__scale_real<<<idim.height/2, idim.width, 0, streams[stream]>>>( d_iq, offset_vv );
__scale_real<<<idim.height/2, idim.width, 0, streams[stream]>>>( d_iq, offset_vh );
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
// Sum
__sum_inplace_v4<<<idim.height/4, idim.width, 2*idim.width*sizeof(cuFloatComplex), streams[stream]>>>( d_iq,
offset_hh );
__sum_inplace_v4<<<idim.height/4, idim.width, 2*idim.width*sizeof(cuFloatComplex), streams[stream]>>>( d_iq,
offset_vv );
__sum_inplace_v4<<<idim.height/4, idim.width, 2*idim.width*sizeof(cuFloatComplex), streams[stream]>>>( d_iq,
offset_vh );
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
}
void perform_stage_3(Dimension4 idim, Dimension4 odim, int sector, int elevation, int stream) {
cout << "Performing Stage III..." << endl;
int
offset_hh = idim.copy_at_depth( 0, 0, 0, stream ),
offset_vv = idim.copy_at_depth( 0, 0, 1, stream ),
offset_vh = idim.copy_at_depth( 0, 0, 2, stream );
// Calculate ZdB, Zdr
__calcresult_v2<<<1, idim.height/2, 0, streams[stream]>>>(
d_iq,
d_result,
idim.width,
offset_hh, offset_vv, offset_vh,
odim.copy_at_depth( 0, 0, 0, stream ));
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
}
void advance(Dimension4 idim) {
cout << "Advancing to next sector..." << endl;
current_sector = (current_sector + 1)%n_sectors;
if (current_sector == 0) {
current_elevation = (current_elevation + 1)%n_elevations;
}
current_stream = (current_stream + 1)%idim.depth;
}
void copy_result_to_host(Dimension4 idim, Dimension4 odim, Dimension4 sitdim, int sector, int elevation, int stream) {
/*
cout << 1 << endl;
cuFloatComplex *dump = new cuFloatComplex[idim.m_size];
cout << 2 << endl;
cudaMemcpyAsync(
dump,
&d_iq[idim.copy_at_depth( 0, 0, 0, stream )],
idim.m_size * sizeof( cuFloatComplex ),
cudaMemcpyDeviceToHost,
streams[stream] );
cout << 3 << endl;
for (int j = 0; j < idim.height/2; j++) {
for (int i = 0; i < idim.width; i++) {
int idx = idim.copy_at_depth( i, j, 0, 0 );
cout << "(" << dump[idx].x << "," << dump[idx].y << ") ";
}
cout << endl;
}
cout << 4 << endl;
exit( 0 );
*/
cout << "Copying result to host..." << endl;
gpuErrchk( cudaMemcpyAsync(
&result[sitdim.copy_at_depth( 0, 0, sector, elevation )],
&d_result[odim.copy_at_depth( 0, 0, 0, stream )],
odim.m_size*sizeof( float ),
cudaMemcpyDeviceToHost,
streams[stream] ));
// cout << "zdb:" << endl;
// for (int i=0; i<sitdim.height; i++) {
// cout << result[sitdim.copy_at_depth(0,i,sector,elevation)] << endl;
// }
// exit(0);
}
void send_results(Dimension4 sitdim, int sector, int elevation) {
cout << "Sending results to network..." << endl;
float *zdb = new float[sitdim.height];
float *zdr = new float[sitdim.height];
for (int i = 0; i < sitdim.height; i++) {
zdb[i] = result[sitdim.copy_at_depth( 0, i, sector, elevation )];
zdr[i] = result[sitdim.copy_at_depth( 1, i, sector, elevation )];
}
int buff_size = sizeof( float )*sitdim.height + 4; // + 2 for sector id + 2 for elevation
unsigned char *zdb_buff = new unsigned char[buff_size];
unsigned char *zdr_buff = new unsigned char[buff_size];
zdb_buff[0] = (sector >> 8) & 0xff;
zdb_buff[1] = (sector) & 0xff;
zdb_buff[2] = (elevation >> 8) & 0xff;
zdb_buff[3] = (elevation) & 0xff;
zdr_buff[0] = (sector >> 8) & 0xff;
zdr_buff[1] = (sector) & 0xff;
zdr_buff[2] = (elevation >> 8) & 0xff;
zdr_buff[3] = (elevation) & 0xff;
aftoab( zdb, sitdim.height, &zdb_buff[4] );
aftoab( zdr, sitdim.height, &zdr_buff[4] );
stringstream localStream;
// zdb
localStream.rdbuf()->pubsetbuf( (char*) &zdb_buff[0], buff_size );
std::string str_zdb( localStream.str() );
cout << "Sector " << sector << ": sending ZdB " << str_zdb.size() << " chars...";
s_sendmore( publisher, (std::string) "B" );
s_send( publisher, (std::string) str_zdb );
cout << " Done." << endl;
// zdr
localStream.rdbuf()->pubsetbuf( (char*) &zdr_buff[0], buff_size );
std::string str_zdr( localStream.str() );
cout << "Sector " << sector << ": sending Zdr " << str_zdr.size() << " chars...";
s_sendmore( publisher, (std::string) "C" );
s_send( publisher, (std::string) str_zdr );
cout << " Done." << endl;
}
void do_process(Dimension4 idim, Dimension4 odim, Dimension4 sitdim) {
cout << "Starting main loop..." << endl;
read_matrix( idim, current_sector, current_elevation, current_stream );
copy_matrix_to_device( idim, current_sector, current_elevation, current_stream );
do {
perform_stage_1( idim, current_stream );
perform_stage_2( idim, current_stream );
perform_stage_3( idim, odim, current_sector, current_elevation, current_stream );
int
prev_sector = current_sector,
prev_elevation = current_elevation,
prev_stream = current_stream;
advance( idim );
read_matrix( idim, current_sector, current_elevation, current_stream );
copy_matrix_to_device( idim, current_sector, current_elevation, current_stream );
copy_result_to_host( idim, odim, sitdim, prev_sector, prev_elevation, prev_stream );
send_results( sitdim, prev_sector, prev_elevation );
} while (true);
}
void destroy_streams(int n) {
cout << "Destroying streams..." << endl;
for (int i = 0; i < n; i++) {
gpuErrchk( cudaStreamDestroy( streams[i] ));
}
}
void destroy_device_arys() {
cout << "Destroying device arrays..." << endl;
gpuErrchk( cudaFree( d_hamming ));
gpuErrchk( cudaFree( d_iq ));
gpuErrchk( cudaFree( d_tmp ));
gpuErrchk( cudaFree( d_result ));
}
void destroy_host_arys() {
cout << "Destroying host arrays..." << endl;
gpuErrchk( cudaFreeHost( p_iq ));
delete[] result;
delete[] hamming_coef;
delete[] fft_ma;
delete[] fft_range_handle,
delete[] fft_doppler_handle,
delete[] fft_pdop_handle;
}
void destroy_arrays() {
cout << "Destroying arrays:" << endl;
destroy_device_arys();
destroy_host_arys();
}
int main(int argc, char **argv) {
ios_base::sync_with_stdio( false );
int num_streams = 2;
if (argc > 1) {
num_streams = atoi( argv[1] );
num_streams = num_streams < 2 ? 2 : num_streams;
}
Dimension4 idim( n_samples, n_sweeps, 3, num_streams );
Dimension4 odim( 2, n_sweeps/2, 1, num_streams );
Dimension4 sitdim( 2, n_sweeps/2, n_sectors, n_elevations );
setup_ports();
generate_constants( idim, ma_count );
prepare_arys( idim, odim, sitdim );
initialize_streams( idim, odim );
do_process( idim, odim, sitdim );
destroy_streams( num_streams );
destroy_arrays();
gpuErrchk( cudaDeviceReset() );
return 0;
}
|
0fd6792a7086fad1d4b252b4d024dd607e3b0bcb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file fwt_new.cu
*
* @author Milosz Ciznicki
*/
/**
* @file fwt.cu
*
* @author Milosz Ciznicki
*/
extern "C" {
#include "fwt_new.h"
}
/**
* @defgroup 97Coeff 97 Coefficients.
*
* 97 Coefficients.
*
* @{
*/
const float a1 = -1.586134342f;
const float a2 = -0.05298011854f;
const float a3 = 0.8829110762f;
const float a4 = 0.4435068522f;
/** @} */
/**
* @defgroup ScaleCoeff Scale coefficients.
*
* Scale coefficients.
*
* @{
*/
const float k = 1.230174104914f; // 1.230174104914
/** @} */
/**
* @defgroup 53Coeff 53 coefficients.
*
* 53 coefficients.
*
* @{
*/
const float p53 = -0.5f;
const float u53 = 0.25f;
/** @} */
/**
* @brief Does lifting process.
*
* @param a Coefficient.
* @param pix_neighborhood Array storing neighbor pixels.
*/
template <class T, unsigned int start, unsigned int end>
__device__
void process_new(const float a, T *pix_neighborhood)
{
#pragma unroll
for(int i = start; i <= end; i+=2)
{
pix_neighborhood[i] += a * (pix_neighborhood[i-1] + pix_neighborhood[i+1]);
}
}
/**
* @brief Does lifting process.
*
* @param a Coefficient.
* @param pix_neighborhood Array storing neighbor pixels.
*/
template <class T, unsigned int start, unsigned int end>
__device__
void process53_new(const int sign, const int approx, const int a, T *pix_neighborhood)
{
#pragma unroll
for(int i = start; i <= end; i+=2)
{
pix_neighborhood[i] += sign * ((pix_neighborhood[i-1] + pix_neighborhood[i+1] + approx) >> a);
}
}
/**
* @brief Saves results to temporary array.
*
* @param p_offset_y Row number actually being processed
* @param results Array containing temporary results.
* @param pix_neighborhood Array storing neighbor pixels.
*/
template <class T, unsigned int n, int even, int odd>
__device__
void save_part_results_new(int p_offset_y, T *results, T *pix_neighborhood)
{
#pragma unroll
for(int i = 0; i < n; i++)
{
if(p_offset_y == i)
{
results[2*i] = pix_neighborhood[even]; // even - low-pass smaple - a1->a2->a3->a4
results[2*i + 1] = pix_neighborhood[odd]; // odd - high-pass smaple - a1->a2->a3
}
}
}
template <class T, unsigned int n>
__device__
void save_to_shared2_new(float k, short2 tid, short2 p_offset, int p_size_x, T *results, T shared[][MEMSIZE + 1])
{
#pragma unroll
for(int i = 0; i < n; i++)
{
if(p_offset.y == i)
{
shared[tid.y][tid.x] = results[2*i] / k;
if(tid.x + p_offset.x < p_size_x)// p_size_y
shared[tid.y][tid.x + p_offset.x] = k * results[2*i + 1];
}
}
}
/**
* @brief Saves computed results to shared memory.
*
* @param k Scale coefficient.
* @param tid Thread id.
* @param p_offset Offset in shared memory.
* @param p_size_x Computed block width.
* @param results Array containing computed results.
* @param shared Shared memory.
*/
template <class T, unsigned int n>
__device__
void save_to_shared_new(float k, short2 tid, short2 p_offset, int p_size_x, T *results, T shared[][MEMSIZE + 1])
{
#pragma unroll
for(int i = 0; i < n; i++)
{
if(p_offset.y == i)
{
shared[tid.y][tid.x] = results[2*i] / k;
if(tid.y + p_offset.x < p_size_x)// p_size_y
shared[tid.y + p_offset.x][tid.x] = k * results[2*i + 1];
}
}
}
/**
* @brief Computes forward 97 lifting process and saves results to shared memory.
*
* @param tidy Thread y id.
* @param tidx2 Even thread x id.
* @param p_offset_y Row number actually being processed.
* @param pix_neighborhood Array storing neighbor pixels.
* @param shared Shared memory.
* @param results Array containing computed results.
*/
__device__
void fprocess_97_new(short tidy, const short tidx2, short p_offset_y, float *pix_neighborhood, const float shared[][MEMSIZE + 1],
float *results)
{
// Read necessary data
#pragma unroll
for (int i = 0; i < 9; i++)
{// pragma unroll
pix_neighborhood[i] = shared[tidy][tidx2 + i - 4 + OFFSET_97];
}
// Predict 1
process_new<float, 1, 7> (a1, pix_neighborhood);
// Update 1
process_new<float, 2, 6> (a2, pix_neighborhood);
// Predict 2
process_new<float, 3, 5> (a3, pix_neighborhood);
// Update 2
process_new<float, 4, 4> (a4, pix_neighborhood);
// Can not dynamically index registers, avoid local memory usage.
// results[0 + p_offset_y * 2] = pix_neighborhood[4];
// results[1 + p_offset_y * 2] = pix_neighborhood[5];
save_part_results_new<float, (MEMSIZE + (BLOCKSIZEY - 1)) / BLOCKSIZEY, 4, 5> (p_offset_y, results, pix_neighborhood);
}
/**
* @brief Computes forward 53 lifting process and saves results to shared memory.
*
* @param tidy Thread y id.
* @param tidx2 Even thread x id.
* @param p_offset_y Row number actually being processed.
* @param pix_neighborhood Array storing neighbor pixels.
* @param shared Shared memory.
* @param results Array containing computed results.
*/
__device__
void fprocess_53_2_new(short tidy, const short tidx2, short p_offset_y, int *pix_neighborhood, const int shared[][MEMSIZE + 1],
int *results)
{
// Read necessary data
#pragma unroll
for (int i = 0; i < 5; i++)
{
pix_neighborhood[i] = shared[tidy][tidx2 + i - 2 + OFFSET_53];
}
// Predict 1
// process53<int, 1, 3> (-1, 0, 1, pix_neighborhood);
pix_neighborhood[1] -= ((pix_neighborhood[0] + pix_neighborhood[2]) >> 1);
pix_neighborhood[3] -= ((pix_neighborhood[2] + pix_neighborhood[4]) >> 1);
// Update 1
// process53<int, 2, 2> (1, 2, 2, pix_neighborhood);
pix_neighborhood[2] += ((pix_neighborhood[1] + pix_neighborhood[3] + 2) >> 2);
// Can not dynamically index registers, avoid local memory usage.
// results[0 + p_offset_y * 2] = pix_neighborhood[4];
// results[1 + p_offset_y * 2] = pix_neighborhood[5];
save_part_results_new<int, (MEMSIZE + (BLOCKSIZEY - 1)) / BLOCKSIZEY, 2, 3> (p_offset_y, results, pix_neighborhood);
}
/**
* @brief Computes forward 53 lifting process and saves results to shared memory.
*
* @param tidy Thread y id.
* @param tidx2 Even thread x id.
* @param p_offset_y Row number actually being processed.
* @param pix_neighborhood Array storing neighbor pixels.
* @param shared Shared memory.
* @param results Array containing computed results.
*/
__device__
void fprocess_53_new(short tidy, const short tidx2, short p_offset_y, int *pix_neighborhood, const int shared[][MEMSIZE + 1],
int *results)
{
// Read necessary data
#pragma unroll
for (int i = 0; i < 5; i++)
{
pix_neighborhood[i] = shared[tidy][tidx2 + i - 2 + OFFSET_53];
}
// Predict 1
process_new<int, 1, 3> (p53, pix_neighborhood);
// Update 1
process_new<int, 2, 2> (u53, pix_neighborhood);
// Can not dynamically index registers, avoid local memory usage.
// results[0 + p_offset_y * 2] = pix_neighborhood[4];
// results[1 + p_offset_y * 2] = pix_neighborhood[5];
save_part_results_new<int, (MEMSIZE + (BLOCKSIZEY - 1)) / BLOCKSIZEY, 2, 3> (p_offset_y, results, pix_neighborhood);
}
/**
* @brief Reads data form global memory to shared memory.
*
* Data from global memory is read with additional margin containing nesting pixels. The margin width depends on offset variable.
*
* @param tid Thread id.
* @param bid Block id.
* @param p_size Block width and height.
* @param img_size Input image width and height.
* @param step_x Output image width.
* @param idata Input array.
* @param shared Shared memory
* @param results Temporary array.
* @param offset Margin width.
*/
template<class T>
__device__
void read_data_new(short2 tid, const int2 bid, const short2 p_size, const int2 img_size, const int step_x, const float *idata,
T shared[][MEMSIZE + 1], T *results, const int offset)
{
// Threads offset to read margins
short2 p_offset;
// Left and top offset
// If first block in row, compute left offset to symmetric extension.
const short p_l_offset_x =
((bid.x == FIRST_BLOCK) ? (offset - tid.x) /* left symmetric extension*/: -offset + tid.x /* take from previous block */);
// If first block in column, compute top offset to symmetric extension.
const short p_t_offset_y =
((bid.y == FIRST_BLOCK) ? (offset - tid.y) /* top symmetric extension*/: -offset + tid.y /* take from previous block */);
// Read patch from GM to SM
while (tid.y < p_size.y + 2 * offset)
{
while (tid.x < p_size.x + 2 * offset)
{
// First offset threads do symmetric extension.
p_offset.x = ((tid.x < offset) ? p_l_offset_x /* take from left adjacent block */: -offset + tid.x /* take normally pixels */);
p_offset.y = ((tid.y < offset) ? p_t_offset_y /* take from top adjacent block */: -offset + tid.y /* take normally pixels */);
// Take as many pixels as it is possible from right side
results[2] = ((tid.x - offset < img_size.x - bid.x) ? (tid.x - offset) /* Take pixels from next block */: ((img_size.x - bid.x
+ (img_size.x - bid.x - p_size.x) - 2) - (tid.x - (p_size.x + offset)))) /* Take missing pixel by doing symmetric extensions */;
// If there are less than offset pixels on bottom side
results[3] = ((tid.y - offset < img_size.y - bid.y) ? (tid.y - offset) /* Take pixels from next block */: ((img_size.y - bid.y
+ (img_size.y - bid.y - p_size.y) - 2) - (tid.y - (p_size.y + offset)))) /* Take missing pixel by doing symmetric extensions */;
// // If next to last block in row, compute right offset to symmetric extension.
// results[2] = ((img_size.x - (bid.x + PATCHX) < offset) ? ((img_size.x - bid.x - 2) - (tid.x - (p_size.x + offset))) /* Take missing pixel by doing symmetric extensions */ : tid.x - offset /* Take pixels from next block */);
// // If next to last block in column, compute bottom offset to symmetric extension.
// results[3] = ((img_size.y - (bid.y + PATCHY) < offset) ? ((img_size.y - bid.y - 2) - (tid.y - (p_size.y + offset))) /* Take missing pixel by doing symmetric extensions */ : tid.y - offset /* Take pixels from next block */);
// // If next to last block in row, compute right offset to symmetric extension.
// results[2] = ((img_size.x - (bid.x + PATCHX) < offset) ? ((img_size.x - bid.x - 1) - (tid.x - (p_size.x + offset))) : tid.x - offset /* Take pixels from next block */);
// // If next to last block in column, compute bottom offset to symmetric extension.
// results[3] = ((img_size.y - (bid.y + PATCHY) < offset) ? ((img_size.y - bid.y - 1) - (tid.y - (p_size.y + offset))) : tid.y - offset /* Take pixels from next block */);
// If next to last block in row, compute right offset to symmetric extension.
results[2] = ((img_size.x - (bid.x + PATCHX) < offset) ? results[2] : tid.x - offset /* Take pixels from next block */);
// If next to last block in column, compute bottom offset to symmetric extension.
results[3] = ((img_size.y - (bid.y + PATCHY) < offset) ? results[3] : tid.y - offset /* Take pixels from next block */);
// // If next to last block in row, compute right offset to symmetric extension.
// results[2] = ((img_size.x - bid.x < PATCHX + 4) ? results[2] : tid.x - offset /* Take pixels from next block */);
// // If next to last block in column, compute bottom offset to symmetric extension.
// results[3] = ((img_size.y - bid.y < PATCHY + 4) ? results[3] : tid.y - offset /* Take pixels from next block */);
// If last block in row, compute right offset to symmetric extension.
results[2] = ((img_size.x - bid.x < PATCHX + 1) ? ((img_size.x - bid.x - 2) - (tid.x - (p_size.x + offset))) /* Symmetric extension 0 1 2 3 | 2 1 0 */
: results[2]);
// If last block in column, compute bottom offset to symmetric extension.
results[3] = ((img_size.y - bid.y < PATCHY + 1) ? ((img_size.y - bid.y - 2) - (tid.y - (p_size.y + offset))) /* Symmetric extension 0 1 2 3 | 2 1 0 */
: results[3]);
// Last threads do symmetric extension.
p_offset.x = ((tid.x >= p_size.x + offset) ? results[2] : p_offset.x);
p_offset.y = ((tid.y >= p_size.y + offset) ? results[3] : p_offset.y);
shared[tid.y][tid.x] = idata[bid.x + p_offset.x + (bid.y + p_offset.y) * step_x];
tid.x += BLOCKSIZEX;
}
tid.x = threadIdx.x;
tid.y += BLOCKSIZEY;
}
}
/**
* @brief Saves data from shared memory to global memory.
*
* @param tid Thread id.
* @param p_offset X and y offset in shared memory.
* @param p_size Width and height computed block.
* @param img_size Image width and height.
* @param step_x Output image width.
* @param odata Output array.
* @param shared Shared memory.
*/
template<class T>
__device__
void save_data_new(short2 tid, short2 p_offset, const short2 p_size, const int2 img_size, const int step_x, float *odata,
T shared[][MEMSIZE + 1])
{
tid.x = threadIdx.x;
tid.y = threadIdx.y;
// Column offset
p_offset.x = (int) ceilf(p_size.x / 2.0f);
// Row offset
p_offset.y = (int) ceilf(p_size.y / 2.0f);
// Save to GM
while (tid.y < p_offset.y)
{
//if(tid.x + blockIdx.x * BLOCKSIZEX < img_size.x && tid.y + blockIdx.y * BLOCKSIZEY < img_size.y)
while (tid.x < p_offset.x)
{
odata[tid.x + blockIdx.x * PATCHX_DIV_2 + (tid.y + blockIdx.y * PATCHY_DIV_2) * step_x] = shared[tid.y][tid.x];
if (tid.x + (int) ceilf(img_size.x / 2.0f) + blockIdx.x * PATCHX_DIV_2 < img_size.x)
odata[tid.x + (int) ceilf(img_size.x / 2.0f) + blockIdx.x * PATCHX_DIV_2 + (tid.y + blockIdx.y * PATCHY_DIV_2) * step_x]
= shared[tid.y][tid.x + p_offset.x];
if (tid.y + (int) ceilf(img_size.y / 2.0f) + blockIdx.y * PATCHY_DIV_2 < img_size.y)
odata[tid.x + blockIdx.x * PATCHX_DIV_2 + (tid.y + (int) ceilf(img_size.y / 2.0f) + blockIdx.y * PATCHY_DIV_2) * step_x]
= shared[tid.y + p_offset.y][tid.x];
if (tid.x + (int) ceilf(img_size.x / 2.0f) + blockIdx.x * PATCHX_DIV_2 < img_size.x && tid.y + (int) ceilf(img_size.y / 2.0f)
+ blockIdx.y * PATCHY_DIV_2 < img_size.y)
odata[tid.x + (int) ceilf(img_size.x / 2.0f) + blockIdx.x * PATCHX_DIV_2 + (tid.y + (int) ceilf(img_size.y / 2.0f)
+ blockIdx.y * PATCHY_DIV_2) * step_x] = shared[tid.y + p_offset.y][tid.x + p_offset.x];
tid.x += BLOCKSIZEX;
}
tid.x = threadIdx.x;
tid.y += BLOCKSIZEY;
}
}
/**
* @brief Computes forward wavelet transform 97.
*
* @param idata Input data.
* @param odata Output data
* @param img_size Struct with input image width and height.
* @param step Struct with output image width and height.
*/
__global__
void fwt97_new(const float *idata, float *odata, const int2 img_size, const int2 step)
{
/* Shared memory for part of the signal */
__shared__ float shared[MEMSIZE][MEMSIZE + 1];
/* Input x, y block dimension */
const int2 bid = make_int2(blockIdx.x * PATCHX, blockIdx.y * PATCHY);
/* Thread id */
short2 tid = make_short2(threadIdx.x, threadIdx.y);
/* Threads offset to read margins */
short2 p_offset;
// Patch size
/* Compute patch offset and size */
const short2 p_size = make_short2(img_size.x - bid.x < PATCHX ? img_size.x - bid.x : PATCHX, img_size.y - bid.y < PATCHY ? img_size.y
- bid.y : PATCHY);
/* Even thread id */
// const short tidx2 = threadIdx.x * 2;
/* Allocate registers in order to compute even and odd pixels. */
float pix_neighborhood[9];
/* Minimize registers usage. Right | bottom offset. Odd | even result pixels. */
float results[((MEMSIZE + (BLOCKSIZEY - 1)) / BLOCKSIZEY) * 2];
read_data_new<float>(tid, bid, p_size, img_size, step.x, idata, shared, results, OFFSET_97);
__syncthreads();
// Thread x id
tid.x = threadIdx.x;
// Thread y id
tid.y = threadIdx.y;
// Row number
p_offset.y = 0;
// Process rows
while (tid.y * 2 < p_size.y && tid.x < p_size.x + 2 * OFFSET_97)
{
// fprocess_97(tid.y, tidx2, p_offset.y, pix_neighborhood, shared, results);
// Read necessary data
#pragma unroll
for (int i = 0; i < 9; i++)
{// pragma unroll
pix_neighborhood[i] = shared[tid.y * 2 + i - 4 + OFFSET_97][tid.x];
}
// Predict 1
process_new<float, 1, 7> (a1, pix_neighborhood);
// Update 1
process_new<float, 2, 6> (a2, pix_neighborhood);
// Predict 2
process_new<float, 3, 5> (a3, pix_neighborhood);
// Update 2
process_new<float, 4, 4> (a4, pix_neighborhood);
// Can not dynamically index registers, avoid local memory usage.
// results[0 + p_offset_y * 2] = pix_neighborhood[4];
// results[1 + p_offset_y * 2] = pix_neighborhood[5];
save_part_results_new<float, (MEMSIZE + (BLOCKSIZEY - 1)) / BLOCKSIZEY, 4, 5> (p_offset.y, results, pix_neighborhood);
p_offset.y++;
tid.x += BLOCKSIZEY;
}
// // Process rows
// while (tid.y < p_size.y + 2 * OFFSET_97 && tidx2 < p_size.x)
// {
// fprocess_97(tid.y, tidx2, p_offset.y, pix_neighborhood, shared, results);
//
// p_offset.y++;
// tid.y += BLOCKSIZEY;
// }
__syncthreads();
tid.x = threadIdx.x;
tid.y = threadIdx.y;
p_offset.y = 0;
// Column offset
p_offset.x = (int) ceilf(p_size.y / 2.0f);
// safe results and rotate
while (tid.y < p_size.y && tid.x < p_size.x + 2 * OFFSET_97)
{
// Can not dynamically index registers, avoid local memory usage.
// shared[tid.x][tid.y] = k2 * results[0 + p_offset.y * 2];
// if(tid.x + BLOCKSIZEX < p_size.y)
// shared[tid.x + BLOCKSIZEX][tid.y] = k1 * results[1 + p_offset.y * 2];
save_to_shared_new<float, (MEMSIZE + (BLOCKSIZEY - 1)) / BLOCKSIZEY> (k, tid, p_offset, p_size.y, results, shared);
p_offset.y++;
tid.x += BLOCKSIZEY;
}
__syncthreads();
tid.x = threadIdx.x;
tid.y = threadIdx.y;
// Row number
p_offset.y = 0;
// Process columns
while (tid.y < p_size.y && tid.x * 2 < p_size.x)
{
// fprocess_97(tid.y, tidx2, p_offset.y, pix_neighborhood, shared, results);
// Read necessary data
#pragma unroll
for (int i = 0; i < 9; i++)
{// pragma unroll
pix_neighborhood[i] = shared[tid.y][tid.x * 2 + i - 4 + OFFSET_97];
}
// Predict 1
process_new<float, 1, 7> (a1, pix_neighborhood);
// Update 1
process_new<float, 2, 6> (a2, pix_neighborhood);
// Predict 2
process_new<float, 3, 5> (a3, pix_neighborhood);
// Update 2
process_new<float, 4, 4> (a4, pix_neighborhood);
save_part_results_new<float, (MEMSIZE + (BLOCKSIZEY - 1)) / BLOCKSIZEY, 4, 5> (p_offset.y, results, pix_neighborhood);
p_offset.y++;
tid.y += BLOCKSIZEY;
}
// // Process columns
// while (tid.y < p_size.x && tidx2 < p_size.y)
// {
// fprocess_97(tid.y, tidx2, p_offset.y, pix_neighborhood, shared, results);
//
// p_offset.y++;
// tid.y += BLOCKSIZEY;
// }
__syncthreads();
tid.x = threadIdx.x;
tid.y = threadIdx.y;
// Row number
p_offset.y = 0;
// Row offset
p_offset.x = (int) ceilf(p_size.x / 2.0f);
// Safe results and rotate
while (tid.y < p_size.y && tid.x < p_size.x)
{
// Can not dynamically index registers, avoid local memory usage.
// shared[tid.x][tid.y] = k2 * results[0 + p_offset.y * 2];
// if(tid.x + BLOCKSIZEX < p_size.y)
// shared[tid.x + BLOCKSIZEX][tid.y] = k1 * results[1 + p_offset.y * 2];
save_to_shared2_new<float, (MEMSIZE + (BLOCKSIZEY - 1)) / BLOCKSIZEY> (k, tid, p_offset, p_size.x, results, shared);
p_offset.y++;
tid.y += BLOCKSIZEY;
}
__syncthreads();
save_data_new<float>(tid, p_offset, p_size, img_size, step.x, odata, shared);
}
/**
* @brief Computes forward wavelet transform 53.
*
* @param idata Input data.
* @param odata Output data
* @param img_size Struct with input image width and height.
* @param step Struct with output image width and height.
*/
__global__
void fwt53_new(const float *idata, float *odata, const int2 img_size, const int2 step)
{
/* Shared memory for part of the signal */
__shared__ int shared[MEMSIZE][MEMSIZE + 1];
/* Begining x, y of the PATCH */
const int2 bid = make_int2(blockIdx.x * PATCHX, blockIdx.y * PATCHY);
/* Thread id */
short2 tid = make_short2(threadIdx.x, threadIdx.y);
/* Threads offset to read margins */
short2 p_offset;
/* Compute patch size */
const short2 p_size = make_short2(img_size.x - bid.x < PATCHX ? img_size.x - bid.x : PATCHX, img_size.y - bid.y < PATCHY ? img_size.y
- bid.y : PATCHY);
/* Allocate registers in order to compute even and odd pixels. */
int pix_neighborhood[5];
// Minimize registers usage. Right | bottom offset. Odd | even result pixels.
int results[((MEMSIZE + (BLOCKSIZEY - 1)) / BLOCKSIZEY) * 2];
read_data_new<int>(tid, bid, p_size, img_size, step.x, idata, shared, results, OFFSET_53);
__syncthreads();
// Thread x id
tid.x = threadIdx.x;
// Thread y id
tid.y = threadIdx.y;
// Row number
p_offset.y = 0;
// Process rows
while (tid.y * 2 < p_size.y && tid.x < p_size.x + 2 * OFFSET_53)
{
#pragma unroll
for (int i = 0; i < 5; i++)
{
pix_neighborhood[i] = shared[tid.y * 2 + i - 2 + OFFSET_53][tid.x];
}
pix_neighborhood[1] -= ((pix_neighborhood[0] + pix_neighborhood[2]) >> 1);
pix_neighborhood[3] -= ((pix_neighborhood[2] + pix_neighborhood[4]) >> 1);
// Update 1
// process53<int, 2, 2> (1, 2, 2, pix_neighborhood);
pix_neighborhood[2] += ((pix_neighborhood[1] + pix_neighborhood[3] + 2) >> 2);
save_part_results_new<int, (MEMSIZE + (BLOCKSIZEY - 1)) / BLOCKSIZEY, 2, 3> (p_offset.y, results, pix_neighborhood);
p_offset.y++;
tid.x += BLOCKSIZEY;
}
__syncthreads();
tid.x = threadIdx.x;
tid.y = threadIdx.y;
p_offset.y = 0;
// Column offset
p_offset.x = (int) ceilf(p_size.y / 2.0f);
// safe results and rotate
while (tid.y < p_size.y && tid.x < p_size.x + 2 * OFFSET_53)
{
// Can not dynamically index registers, avoid local memory usage.
// shared[tid.x][tid.y] = k2 * results[0 + p_offset.y * 2];
// if(tid.x + BLOCKSIZEX < p_size.y)
// shared[tid.x + BLOCKSIZEX][tid.y] = k1 * results[1 + p_offset.y * 2];
save_to_shared_new<int, (MEMSIZE + (BLOCKSIZEY - 1)) / BLOCKSIZEY> (1, tid, p_offset, p_size.y, results, shared);
p_offset.y++;
tid.x += BLOCKSIZEY;
}
__syncthreads();
tid.x = threadIdx.x;
tid.y = threadIdx.y;
// Row number
p_offset.y = 0;
// Process columns
while (tid.y < p_size.y && tid.x * 2 < p_size.x)
{
#pragma unroll
for (int i = 0; i < 5; i++)
{
pix_neighborhood[i] = shared[tid.y][tid.x * 2 + i - 2 + OFFSET_53];
}
pix_neighborhood[1] -= ((pix_neighborhood[0] + pix_neighborhood[2]) >> 1);
pix_neighborhood[3] -= ((pix_neighborhood[2] + pix_neighborhood[4]) >> 1);
pix_neighborhood[2] += ((pix_neighborhood[1] + pix_neighborhood[3] + 2) >> 2);
save_part_results_new<int, (MEMSIZE + (BLOCKSIZEY - 1)) / BLOCKSIZEY, 2, 3> (p_offset.y, results, pix_neighborhood);
p_offset.y++;
tid.y += BLOCKSIZEY;
}
__syncthreads();
tid.x = threadIdx.x;
tid.y = threadIdx.y;
// Row number
p_offset.y = 0;
// Row offset
p_offset.x = (int) ceilf(p_size.x / 2.0f);
// Safe results and rotate
while (tid.y < p_size.y && tid.x < p_size.x)
{
// Can not dynamically index registers, avoid local memory usage.
// shared[tid.x][tid.y] = k2 * results[0 + p_offset.y * 2];
// if(tid.x + BLOCKSIZEX < p_size.y)
// shared[tid.x + BLOCKSIZEX][tid.y] = k1 * results[1 + p_offset.y * 2];
save_to_shared2_new<int, (MEMSIZE + (BLOCKSIZEY - 1)) / BLOCKSIZEY> (1, tid, p_offset, p_size.x, results, shared);
p_offset.y++;
tid.y += BLOCKSIZEY;
}
__syncthreads();
save_data_new<int>(tid, p_offset, p_size, img_size, step.x, odata, shared);
}
| 0fd6792a7086fad1d4b252b4d024dd607e3b0bcb.cu | /**
* @file fwt_new.cu
*
* @author Milosz Ciznicki
*/
/**
* @file fwt.cu
*
* @author Milosz Ciznicki
*/
extern "C" {
#include "fwt_new.h"
}
/**
* @defgroup 97Coeff 97 Coefficients.
*
* 97 Coefficients.
*
* @{
*/
const float a1 = -1.586134342f;
const float a2 = -0.05298011854f;
const float a3 = 0.8829110762f;
const float a4 = 0.4435068522f;
/** @} */
/**
* @defgroup ScaleCoeff Scale coefficients.
*
* Scale coefficients.
*
* @{
*/
const float k = 1.230174104914f; // 1.230174104914
/** @} */
/**
* @defgroup 53Coeff 53 coefficients.
*
* 53 coefficients.
*
* @{
*/
const float p53 = -0.5f;
const float u53 = 0.25f;
/** @} */
/**
* @brief Does lifting process.
*
* @param a Coefficient.
* @param pix_neighborhood Array storing neighbor pixels.
*/
template <class T, unsigned int start, unsigned int end>
__device__
void process_new(const float a, T *pix_neighborhood)
{
#pragma unroll
for(int i = start; i <= end; i+=2)
{
pix_neighborhood[i] += a * (pix_neighborhood[i-1] + pix_neighborhood[i+1]);
}
}
/**
* @brief Does lifting process.
*
* @param a Coefficient.
* @param pix_neighborhood Array storing neighbor pixels.
*/
template <class T, unsigned int start, unsigned int end>
__device__
void process53_new(const int sign, const int approx, const int a, T *pix_neighborhood)
{
#pragma unroll
for(int i = start; i <= end; i+=2)
{
pix_neighborhood[i] += sign * ((pix_neighborhood[i-1] + pix_neighborhood[i+1] + approx) >> a);
}
}
/**
* @brief Saves results to temporary array.
*
* @param p_offset_y Row number actually being processed
* @param results Array containing temporary results.
* @param pix_neighborhood Array storing neighbor pixels.
*/
template <class T, unsigned int n, int even, int odd>
__device__
void save_part_results_new(int p_offset_y, T *results, T *pix_neighborhood)
{
#pragma unroll
for(int i = 0; i < n; i++)
{
if(p_offset_y == i)
{
results[2*i] = pix_neighborhood[even]; // even - low-pass smaple - a1->a2->a3->a4
results[2*i + 1] = pix_neighborhood[odd]; // odd - high-pass smaple - a1->a2->a3
}
}
}
template <class T, unsigned int n>
__device__
void save_to_shared2_new(float k, short2 tid, short2 p_offset, int p_size_x, T *results, T shared[][MEMSIZE + 1])
{
#pragma unroll
for(int i = 0; i < n; i++)
{
if(p_offset.y == i)
{
shared[tid.y][tid.x] = results[2*i] / k;
if(tid.x + p_offset.x < p_size_x)// p_size_y
shared[tid.y][tid.x + p_offset.x] = k * results[2*i + 1];
}
}
}
/**
* @brief Saves computed results to shared memory.
*
* @param k Scale coefficient.
* @param tid Thread id.
* @param p_offset Offset in shared memory.
* @param p_size_x Computed block width.
* @param results Array containing computed results.
* @param shared Shared memory.
*/
template <class T, unsigned int n>
__device__
void save_to_shared_new(float k, short2 tid, short2 p_offset, int p_size_x, T *results, T shared[][MEMSIZE + 1])
{
#pragma unroll
for(int i = 0; i < n; i++)
{
if(p_offset.y == i)
{
shared[tid.y][tid.x] = results[2*i] / k;
if(tid.y + p_offset.x < p_size_x)// p_size_y
shared[tid.y + p_offset.x][tid.x] = k * results[2*i + 1];
}
}
}
/**
* @brief Computes forward 97 lifting process and saves results to shared memory.
*
* @param tidy Thread y id.
* @param tidx2 Even thread x id.
* @param p_offset_y Row number actually being processed.
* @param pix_neighborhood Array storing neighbor pixels.
* @param shared Shared memory.
* @param results Array containing computed results.
*/
__device__
void fprocess_97_new(short tidy, const short tidx2, short p_offset_y, float *pix_neighborhood, const float shared[][MEMSIZE + 1],
float *results)
{
// Read necessary data
#pragma unroll
for (int i = 0; i < 9; i++)
{// pragma unroll
pix_neighborhood[i] = shared[tidy][tidx2 + i - 4 + OFFSET_97];
}
// Predict 1
process_new<float, 1, 7> (a1, pix_neighborhood);
// Update 1
process_new<float, 2, 6> (a2, pix_neighborhood);
// Predict 2
process_new<float, 3, 5> (a3, pix_neighborhood);
// Update 2
process_new<float, 4, 4> (a4, pix_neighborhood);
// Can not dynamically index registers, avoid local memory usage.
// results[0 + p_offset_y * 2] = pix_neighborhood[4];
// results[1 + p_offset_y * 2] = pix_neighborhood[5];
save_part_results_new<float, (MEMSIZE + (BLOCKSIZEY - 1)) / BLOCKSIZEY, 4, 5> (p_offset_y, results, pix_neighborhood);
}
/**
* @brief Computes forward 53 lifting process and saves results to shared memory.
*
* @param tidy Thread y id.
* @param tidx2 Even thread x id.
* @param p_offset_y Row number actually being processed.
* @param pix_neighborhood Array storing neighbor pixels.
* @param shared Shared memory.
* @param results Array containing computed results.
*/
__device__
void fprocess_53_2_new(short tidy, const short tidx2, short p_offset_y, int *pix_neighborhood, const int shared[][MEMSIZE + 1],
int *results)
{
// Read necessary data
#pragma unroll
for (int i = 0; i < 5; i++)
{
pix_neighborhood[i] = shared[tidy][tidx2 + i - 2 + OFFSET_53];
}
// Predict 1
// process53<int, 1, 3> (-1, 0, 1, pix_neighborhood);
pix_neighborhood[1] -= ((pix_neighborhood[0] + pix_neighborhood[2]) >> 1);
pix_neighborhood[3] -= ((pix_neighborhood[2] + pix_neighborhood[4]) >> 1);
// Update 1
// process53<int, 2, 2> (1, 2, 2, pix_neighborhood);
pix_neighborhood[2] += ((pix_neighborhood[1] + pix_neighborhood[3] + 2) >> 2);
// Can not dynamically index registers, avoid local memory usage.
// results[0 + p_offset_y * 2] = pix_neighborhood[4];
// results[1 + p_offset_y * 2] = pix_neighborhood[5];
save_part_results_new<int, (MEMSIZE + (BLOCKSIZEY - 1)) / BLOCKSIZEY, 2, 3> (p_offset_y, results, pix_neighborhood);
}
/**
* @brief Computes forward 53 lifting process and saves results to shared memory.
*
* @param tidy Thread y id.
* @param tidx2 Even thread x id.
* @param p_offset_y Row number actually being processed.
* @param pix_neighborhood Array storing neighbor pixels.
* @param shared Shared memory.
* @param results Array containing computed results.
*/
__device__
void fprocess_53_new(short tidy, const short tidx2, short p_offset_y, int *pix_neighborhood, const int shared[][MEMSIZE + 1],
int *results)
{
// Read necessary data
#pragma unroll
for (int i = 0; i < 5; i++)
{
pix_neighborhood[i] = shared[tidy][tidx2 + i - 2 + OFFSET_53];
}
// Predict 1
process_new<int, 1, 3> (p53, pix_neighborhood);
// Update 1
process_new<int, 2, 2> (u53, pix_neighborhood);
// Can not dynamically index registers, avoid local memory usage.
// results[0 + p_offset_y * 2] = pix_neighborhood[4];
// results[1 + p_offset_y * 2] = pix_neighborhood[5];
save_part_results_new<int, (MEMSIZE + (BLOCKSIZEY - 1)) / BLOCKSIZEY, 2, 3> (p_offset_y, results, pix_neighborhood);
}
/**
* @brief Reads data form global memory to shared memory.
*
* Data from global memory is read with additional margin containing nesting pixels. The margin width depends on offset variable.
*
* @param tid Thread id.
* @param bid Block id.
* @param p_size Block width and height.
* @param img_size Input image width and height.
* @param step_x Output image width.
* @param idata Input array.
* @param shared Shared memory
* @param results Temporary array.
* @param offset Margin width.
*/
template<class T>
__device__
void read_data_new(short2 tid, const int2 bid, const short2 p_size, const int2 img_size, const int step_x, const float *idata,
T shared[][MEMSIZE + 1], T *results, const int offset)
{
// Threads offset to read margins
short2 p_offset;
// Left and top offset
// If first block in row, compute left offset to symmetric extension.
const short p_l_offset_x =
((bid.x == FIRST_BLOCK) ? (offset - tid.x) /* left symmetric extension*/: -offset + tid.x /* take from previous block */);
// If first block in column, compute top offset to symmetric extension.
const short p_t_offset_y =
((bid.y == FIRST_BLOCK) ? (offset - tid.y) /* top symmetric extension*/: -offset + tid.y /* take from previous block */);
// Read patch from GM to SM
while (tid.y < p_size.y + 2 * offset)
{
while (tid.x < p_size.x + 2 * offset)
{
// First offset threads do symmetric extension.
p_offset.x = ((tid.x < offset) ? p_l_offset_x /* take from left adjacent block */: -offset + tid.x /* take normally pixels */);
p_offset.y = ((tid.y < offset) ? p_t_offset_y /* take from top adjacent block */: -offset + tid.y /* take normally pixels */);
// Take as many pixels as it is possible from right side
results[2] = ((tid.x - offset < img_size.x - bid.x) ? (tid.x - offset) /* Take pixels from next block */: ((img_size.x - bid.x
+ (img_size.x - bid.x - p_size.x) - 2) - (tid.x - (p_size.x + offset)))) /* Take missing pixel by doing symmetric extensions */;
// If there are less than offset pixels on bottom side
results[3] = ((tid.y - offset < img_size.y - bid.y) ? (tid.y - offset) /* Take pixels from next block */: ((img_size.y - bid.y
+ (img_size.y - bid.y - p_size.y) - 2) - (tid.y - (p_size.y + offset)))) /* Take missing pixel by doing symmetric extensions */;
// // If next to last block in row, compute right offset to symmetric extension.
// results[2] = ((img_size.x - (bid.x + PATCHX) < offset) ? ((img_size.x - bid.x - 2) - (tid.x - (p_size.x + offset))) /* Take missing pixel by doing symmetric extensions */ : tid.x - offset /* Take pixels from next block */);
// // If next to last block in column, compute bottom offset to symmetric extension.
// results[3] = ((img_size.y - (bid.y + PATCHY) < offset) ? ((img_size.y - bid.y - 2) - (tid.y - (p_size.y + offset))) /* Take missing pixel by doing symmetric extensions */ : tid.y - offset /* Take pixels from next block */);
// // If next to last block in row, compute right offset to symmetric extension.
// results[2] = ((img_size.x - (bid.x + PATCHX) < offset) ? ((img_size.x - bid.x - 1) - (tid.x - (p_size.x + offset))) : tid.x - offset /* Take pixels from next block */);
// // If next to last block in column, compute bottom offset to symmetric extension.
// results[3] = ((img_size.y - (bid.y + PATCHY) < offset) ? ((img_size.y - bid.y - 1) - (tid.y - (p_size.y + offset))) : tid.y - offset /* Take pixels from next block */);
// If next to last block in row, compute right offset to symmetric extension.
results[2] = ((img_size.x - (bid.x + PATCHX) < offset) ? results[2] : tid.x - offset /* Take pixels from next block */);
// If next to last block in column, compute bottom offset to symmetric extension.
results[3] = ((img_size.y - (bid.y + PATCHY) < offset) ? results[3] : tid.y - offset /* Take pixels from next block */);
// // If next to last block in row, compute right offset to symmetric extension.
// results[2] = ((img_size.x - bid.x < PATCHX + 4) ? results[2] : tid.x - offset /* Take pixels from next block */);
// // If next to last block in column, compute bottom offset to symmetric extension.
// results[3] = ((img_size.y - bid.y < PATCHY + 4) ? results[3] : tid.y - offset /* Take pixels from next block */);
// If last block in row, compute right offset to symmetric extension.
results[2] = ((img_size.x - bid.x < PATCHX + 1) ? ((img_size.x - bid.x - 2) - (tid.x - (p_size.x + offset))) /* Symmetric extension 0 1 2 3 | 2 1 0 */
: results[2]);
// If last block in column, compute bottom offset to symmetric extension.
results[3] = ((img_size.y - bid.y < PATCHY + 1) ? ((img_size.y - bid.y - 2) - (tid.y - (p_size.y + offset))) /* Symmetric extension 0 1 2 3 | 2 1 0 */
: results[3]);
// Last threads do symmetric extension.
p_offset.x = ((tid.x >= p_size.x + offset) ? results[2] : p_offset.x);
p_offset.y = ((tid.y >= p_size.y + offset) ? results[3] : p_offset.y);
shared[tid.y][tid.x] = idata[bid.x + p_offset.x + (bid.y + p_offset.y) * step_x];
tid.x += BLOCKSIZEX;
}
tid.x = threadIdx.x;
tid.y += BLOCKSIZEY;
}
}
/**
* @brief Saves data from shared memory to global memory.
*
* @param tid Thread id.
* @param p_offset X and y offset in shared memory.
* @param p_size Width and height computed block.
* @param img_size Image width and height.
* @param step_x Output image width.
* @param odata Output array.
* @param shared Shared memory.
*/
template<class T>
__device__
void save_data_new(short2 tid, short2 p_offset, const short2 p_size, const int2 img_size, const int step_x, float *odata,
T shared[][MEMSIZE + 1])
{
tid.x = threadIdx.x;
tid.y = threadIdx.y;
// Column offset
p_offset.x = (int) ceilf(p_size.x / 2.0f);
// Row offset
p_offset.y = (int) ceilf(p_size.y / 2.0f);
// Save to GM
while (tid.y < p_offset.y)
{
//if(tid.x + blockIdx.x * BLOCKSIZEX < img_size.x && tid.y + blockIdx.y * BLOCKSIZEY < img_size.y)
while (tid.x < p_offset.x)
{
odata[tid.x + blockIdx.x * PATCHX_DIV_2 + (tid.y + blockIdx.y * PATCHY_DIV_2) * step_x] = shared[tid.y][tid.x];
if (tid.x + (int) ceilf(img_size.x / 2.0f) + blockIdx.x * PATCHX_DIV_2 < img_size.x)
odata[tid.x + (int) ceilf(img_size.x / 2.0f) + blockIdx.x * PATCHX_DIV_2 + (tid.y + blockIdx.y * PATCHY_DIV_2) * step_x]
= shared[tid.y][tid.x + p_offset.x];
if (tid.y + (int) ceilf(img_size.y / 2.0f) + blockIdx.y * PATCHY_DIV_2 < img_size.y)
odata[tid.x + blockIdx.x * PATCHX_DIV_2 + (tid.y + (int) ceilf(img_size.y / 2.0f) + blockIdx.y * PATCHY_DIV_2) * step_x]
= shared[tid.y + p_offset.y][tid.x];
if (tid.x + (int) ceilf(img_size.x / 2.0f) + blockIdx.x * PATCHX_DIV_2 < img_size.x && tid.y + (int) ceilf(img_size.y / 2.0f)
+ blockIdx.y * PATCHY_DIV_2 < img_size.y)
odata[tid.x + (int) ceilf(img_size.x / 2.0f) + blockIdx.x * PATCHX_DIV_2 + (tid.y + (int) ceilf(img_size.y / 2.0f)
+ blockIdx.y * PATCHY_DIV_2) * step_x] = shared[tid.y + p_offset.y][tid.x + p_offset.x];
tid.x += BLOCKSIZEX;
}
tid.x = threadIdx.x;
tid.y += BLOCKSIZEY;
}
}
/**
* @brief Computes forward wavelet transform 97.
*
* @param idata Input data.
* @param odata Output data
* @param img_size Struct with input image width and height.
* @param step Struct with output image width and height.
*/
__global__
void fwt97_new(const float *idata, float *odata, const int2 img_size, const int2 step)
{
/* Shared memory for part of the signal */
__shared__ float shared[MEMSIZE][MEMSIZE + 1];
/* Input x, y block dimension */
const int2 bid = make_int2(blockIdx.x * PATCHX, blockIdx.y * PATCHY);
/* Thread id */
short2 tid = make_short2(threadIdx.x, threadIdx.y);
/* Threads offset to read margins */
short2 p_offset;
// Patch size
/* Compute patch offset and size */
const short2 p_size = make_short2(img_size.x - bid.x < PATCHX ? img_size.x - bid.x : PATCHX, img_size.y - bid.y < PATCHY ? img_size.y
- bid.y : PATCHY);
/* Even thread id */
// const short tidx2 = threadIdx.x * 2;
/* Allocate registers in order to compute even and odd pixels. */
float pix_neighborhood[9];
/* Minimize registers usage. Right | bottom offset. Odd | even result pixels. */
float results[((MEMSIZE + (BLOCKSIZEY - 1)) / BLOCKSIZEY) * 2];
read_data_new<float>(tid, bid, p_size, img_size, step.x, idata, shared, results, OFFSET_97);
__syncthreads();
// Thread x id
tid.x = threadIdx.x;
// Thread y id
tid.y = threadIdx.y;
// Row number
p_offset.y = 0;
// Process rows
while (tid.y * 2 < p_size.y && tid.x < p_size.x + 2 * OFFSET_97)
{
// fprocess_97(tid.y, tidx2, p_offset.y, pix_neighborhood, shared, results);
// Read necessary data
#pragma unroll
for (int i = 0; i < 9; i++)
{// pragma unroll
pix_neighborhood[i] = shared[tid.y * 2 + i - 4 + OFFSET_97][tid.x];
}
// Predict 1
process_new<float, 1, 7> (a1, pix_neighborhood);
// Update 1
process_new<float, 2, 6> (a2, pix_neighborhood);
// Predict 2
process_new<float, 3, 5> (a3, pix_neighborhood);
// Update 2
process_new<float, 4, 4> (a4, pix_neighborhood);
// Can not dynamically index registers, avoid local memory usage.
// results[0 + p_offset_y * 2] = pix_neighborhood[4];
// results[1 + p_offset_y * 2] = pix_neighborhood[5];
save_part_results_new<float, (MEMSIZE + (BLOCKSIZEY - 1)) / BLOCKSIZEY, 4, 5> (p_offset.y, results, pix_neighborhood);
p_offset.y++;
tid.x += BLOCKSIZEY;
}
// // Process rows
// while (tid.y < p_size.y + 2 * OFFSET_97 && tidx2 < p_size.x)
// {
// fprocess_97(tid.y, tidx2, p_offset.y, pix_neighborhood, shared, results);
//
// p_offset.y++;
// tid.y += BLOCKSIZEY;
// }
__syncthreads();
tid.x = threadIdx.x;
tid.y = threadIdx.y;
p_offset.y = 0;
// Column offset
p_offset.x = (int) ceilf(p_size.y / 2.0f);
// safe results and rotate
while (tid.y < p_size.y && tid.x < p_size.x + 2 * OFFSET_97)
{
// Can not dynamically index registers, avoid local memory usage.
// shared[tid.x][tid.y] = k2 * results[0 + p_offset.y * 2];
// if(tid.x + BLOCKSIZEX < p_size.y)
// shared[tid.x + BLOCKSIZEX][tid.y] = k1 * results[1 + p_offset.y * 2];
save_to_shared_new<float, (MEMSIZE + (BLOCKSIZEY - 1)) / BLOCKSIZEY> (k, tid, p_offset, p_size.y, results, shared);
p_offset.y++;
tid.x += BLOCKSIZEY;
}
__syncthreads();
tid.x = threadIdx.x;
tid.y = threadIdx.y;
// Row number
p_offset.y = 0;
// Process columns
while (tid.y < p_size.y && tid.x * 2 < p_size.x)
{
// fprocess_97(tid.y, tidx2, p_offset.y, pix_neighborhood, shared, results);
// Read necessary data
#pragma unroll
for (int i = 0; i < 9; i++)
{// pragma unroll
pix_neighborhood[i] = shared[tid.y][tid.x * 2 + i - 4 + OFFSET_97];
}
// Predict 1
process_new<float, 1, 7> (a1, pix_neighborhood);
// Update 1
process_new<float, 2, 6> (a2, pix_neighborhood);
// Predict 2
process_new<float, 3, 5> (a3, pix_neighborhood);
// Update 2
process_new<float, 4, 4> (a4, pix_neighborhood);
save_part_results_new<float, (MEMSIZE + (BLOCKSIZEY - 1)) / BLOCKSIZEY, 4, 5> (p_offset.y, results, pix_neighborhood);
p_offset.y++;
tid.y += BLOCKSIZEY;
}
// // Process columns
// while (tid.y < p_size.x && tidx2 < p_size.y)
// {
// fprocess_97(tid.y, tidx2, p_offset.y, pix_neighborhood, shared, results);
//
// p_offset.y++;
// tid.y += BLOCKSIZEY;
// }
__syncthreads();
tid.x = threadIdx.x;
tid.y = threadIdx.y;
// Row number
p_offset.y = 0;
// Row offset
p_offset.x = (int) ceilf(p_size.x / 2.0f);
// Safe results and rotate
while (tid.y < p_size.y && tid.x < p_size.x)
{
// Can not dynamically index registers, avoid local memory usage.
// shared[tid.x][tid.y] = k2 * results[0 + p_offset.y * 2];
// if(tid.x + BLOCKSIZEX < p_size.y)
// shared[tid.x + BLOCKSIZEX][tid.y] = k1 * results[1 + p_offset.y * 2];
save_to_shared2_new<float, (MEMSIZE + (BLOCKSIZEY - 1)) / BLOCKSIZEY> (k, tid, p_offset, p_size.x, results, shared);
p_offset.y++;
tid.y += BLOCKSIZEY;
}
__syncthreads();
save_data_new<float>(tid, p_offset, p_size, img_size, step.x, odata, shared);
}
/**
* @brief Computes forward wavelet transform 53.
*
* @param idata Input data.
* @param odata Output data
* @param img_size Struct with input image width and height.
* @param step Struct with output image width and height.
*/
__global__
void fwt53_new(const float *idata, float *odata, const int2 img_size, const int2 step)
{
/* Shared memory for part of the signal */
__shared__ int shared[MEMSIZE][MEMSIZE + 1];
/* Begining x, y of the PATCH */
const int2 bid = make_int2(blockIdx.x * PATCHX, blockIdx.y * PATCHY);
/* Thread id */
short2 tid = make_short2(threadIdx.x, threadIdx.y);
/* Threads offset to read margins */
short2 p_offset;
/* Compute patch size */
const short2 p_size = make_short2(img_size.x - bid.x < PATCHX ? img_size.x - bid.x : PATCHX, img_size.y - bid.y < PATCHY ? img_size.y
- bid.y : PATCHY);
/* Allocate registers in order to compute even and odd pixels. */
int pix_neighborhood[5];
// Minimize registers usage. Right | bottom offset. Odd | even result pixels.
int results[((MEMSIZE + (BLOCKSIZEY - 1)) / BLOCKSIZEY) * 2];
read_data_new<int>(tid, bid, p_size, img_size, step.x, idata, shared, results, OFFSET_53);
__syncthreads();
// Thread x id
tid.x = threadIdx.x;
// Thread y id
tid.y = threadIdx.y;
// Row number
p_offset.y = 0;
// Process rows
while (tid.y * 2 < p_size.y && tid.x < p_size.x + 2 * OFFSET_53)
{
#pragma unroll
for (int i = 0; i < 5; i++)
{
pix_neighborhood[i] = shared[tid.y * 2 + i - 2 + OFFSET_53][tid.x];
}
pix_neighborhood[1] -= ((pix_neighborhood[0] + pix_neighborhood[2]) >> 1);
pix_neighborhood[3] -= ((pix_neighborhood[2] + pix_neighborhood[4]) >> 1);
// Update 1
// process53<int, 2, 2> (1, 2, 2, pix_neighborhood);
pix_neighborhood[2] += ((pix_neighborhood[1] + pix_neighborhood[3] + 2) >> 2);
save_part_results_new<int, (MEMSIZE + (BLOCKSIZEY - 1)) / BLOCKSIZEY, 2, 3> (p_offset.y, results, pix_neighborhood);
p_offset.y++;
tid.x += BLOCKSIZEY;
}
__syncthreads();
tid.x = threadIdx.x;
tid.y = threadIdx.y;
p_offset.y = 0;
// Column offset
p_offset.x = (int) ceilf(p_size.y / 2.0f);
// safe results and rotate
while (tid.y < p_size.y && tid.x < p_size.x + 2 * OFFSET_53)
{
// Can not dynamically index registers, avoid local memory usage.
// shared[tid.x][tid.y] = k2 * results[0 + p_offset.y * 2];
// if(tid.x + BLOCKSIZEX < p_size.y)
// shared[tid.x + BLOCKSIZEX][tid.y] = k1 * results[1 + p_offset.y * 2];
save_to_shared_new<int, (MEMSIZE + (BLOCKSIZEY - 1)) / BLOCKSIZEY> (1, tid, p_offset, p_size.y, results, shared);
p_offset.y++;
tid.x += BLOCKSIZEY;
}
__syncthreads();
tid.x = threadIdx.x;
tid.y = threadIdx.y;
// Row number
p_offset.y = 0;
// Process columns
while (tid.y < p_size.y && tid.x * 2 < p_size.x)
{
#pragma unroll
for (int i = 0; i < 5; i++)
{
pix_neighborhood[i] = shared[tid.y][tid.x * 2 + i - 2 + OFFSET_53];
}
pix_neighborhood[1] -= ((pix_neighborhood[0] + pix_neighborhood[2]) >> 1);
pix_neighborhood[3] -= ((pix_neighborhood[2] + pix_neighborhood[4]) >> 1);
pix_neighborhood[2] += ((pix_neighborhood[1] + pix_neighborhood[3] + 2) >> 2);
save_part_results_new<int, (MEMSIZE + (BLOCKSIZEY - 1)) / BLOCKSIZEY, 2, 3> (p_offset.y, results, pix_neighborhood);
p_offset.y++;
tid.y += BLOCKSIZEY;
}
__syncthreads();
tid.x = threadIdx.x;
tid.y = threadIdx.y;
// Row number
p_offset.y = 0;
// Row offset
p_offset.x = (int) ceilf(p_size.x / 2.0f);
// Safe results and rotate
while (tid.y < p_size.y && tid.x < p_size.x)
{
// Can not dynamically index registers, avoid local memory usage.
// shared[tid.x][tid.y] = k2 * results[0 + p_offset.y * 2];
// if(tid.x + BLOCKSIZEX < p_size.y)
// shared[tid.x + BLOCKSIZEX][tid.y] = k1 * results[1 + p_offset.y * 2];
save_to_shared2_new<int, (MEMSIZE + (BLOCKSIZEY - 1)) / BLOCKSIZEY> (1, tid, p_offset, p_size.x, results, shared);
p_offset.y++;
tid.y += BLOCKSIZEY;
}
__syncthreads();
save_data_new<int>(tid, p_offset, p_size, img_size, step.x, odata, shared);
}
|
208e81dbebad3975e734fb6b2a66611652b8cb60.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "linalg/linalg_internal_gpu/cuMatmul_internal.hpp"
#include "cytnx_error.hpp"
#include "Type.hpp"
#include "utils/lapack_wrapper.h"
namespace cytnx{
namespace linalg_internal{
template<typename UniType>
__global__ void cuMatMul_kernel(UniType *out, const UniType *inl, const UniType *inr, cytnx_int32 Ml, cytnx_int32 Comm, cytnx_int32 Nr){
UniType tmp=0;
cytnx_uint64 sid = blockIdx.x*blockDim.x + threadIdx.x;
if(sid < cytnx_uint64(Ml)*Nr){
for(cytnx_int32 c=0;c<Comm;c++){
tmp += inl[(sid/Nr)*Comm+c]*inr[c*Nr+sid%Nr];
}
out[(sid/Nr)*Comm + sid%Nr] = tmp;
}
}
/// cuMatmul
void cuMatmul_internal_cd(boost::intrusive_ptr<Storage_base> &out, const boost::intrusive_ptr<Storage_base> &inl, const boost::intrusive_ptr<Storage_base> &inr, const cytnx_int32 &Ml, const cytnx_int32 &Comm, const cytnx_int32 &Nr){
// create handles:
hipblasHandle_t cublasH = NULL;
checkCudaErrors(hipblasCreate(&cublasH));
cytnx_complex128 alpha = cytnx_complex128(1,0), beta=cytnx_complex128(0,0);
hipDoubleComplex* _out = (hipDoubleComplex*)out->Mem;
hipDoubleComplex* _inl = (hipDoubleComplex*)inl->Mem;
hipDoubleComplex* _inr = (hipDoubleComplex*)inr->Mem;
// query working space :
checkCudaErrors(hipblasZgemm(cublasH,HIPBLAS_OP_N,HIPBLAS_OP_N,Nr,Ml,Comm,(hipDoubleComplex*)&alpha,_inr,Nr,_inl,Comm,(hipDoubleComplex*)&beta,_out,Nr));
hipblasDestroy(cublasH);
}
void cuMatmul_internal_cf(boost::intrusive_ptr<Storage_base> &out, const boost::intrusive_ptr<Storage_base> &inl, const boost::intrusive_ptr<Storage_base> &inr, const cytnx_int32 &Ml, const cytnx_int32 &Comm, const cytnx_int32 &Nr){
// create handles:
hipblasHandle_t cublasH = NULL;
checkCudaErrors(hipblasCreate(&cublasH));
cytnx_complex64 alpha = cytnx_complex64(1,0), beta=cytnx_complex64(0,0);
cuFloatComplex* _out = (cuFloatComplex*)out->Mem;
cuFloatComplex* _inl = (cuFloatComplex*)inl->Mem;
cuFloatComplex* _inr = (cuFloatComplex*)inr->Mem;
// query working space :
checkCudaErrors(hipblasCgemm(cublasH,HIPBLAS_OP_N,HIPBLAS_OP_N,Nr,Ml,Comm,(cuFloatComplex*)&alpha,_inr,Nr,_inl,Comm,(cuFloatComplex*)&beta,_out,Nr));
hipblasDestroy(cublasH);
}
void cuMatmul_internal_d(boost::intrusive_ptr<Storage_base> &out, const boost::intrusive_ptr<Storage_base> &inl, const boost::intrusive_ptr<Storage_base> &inr, const cytnx_int32 &Ml, const cytnx_int32 &Comm, const cytnx_int32 &Nr){
// create handles:
hipblasHandle_t cublasH = NULL;
checkCudaErrors(hipblasCreate(&cublasH));
cytnx_double alpha = 1, beta=0;
cytnx_double* _out = (cytnx_double*)out->Mem;
cytnx_double* _inl = (cytnx_double*)inl->Mem;
cytnx_double* _inr = (cytnx_double*)inr->Mem;
// query working space :
checkCudaErrors(hipblasDgemm(cublasH,HIPBLAS_OP_N,HIPBLAS_OP_N,Nr,Ml,Comm,&alpha,_inr,Nr,_inl,Comm,&beta,_out,Nr));
hipblasDestroy(cublasH);
}
void cuMatmul_internal_f(boost::intrusive_ptr<Storage_base> &out, const boost::intrusive_ptr<Storage_base> &inl, const boost::intrusive_ptr<Storage_base> &inr, const cytnx_int32 &Ml, const cytnx_int32 &Comm, const cytnx_int32 &Nr){
// create handles:
hipblasHandle_t cublasH = NULL;
checkCudaErrors(hipblasCreate(&cublasH));
cytnx_float alpha = 1, beta=0;
cytnx_float* _out = (cytnx_float*)out->Mem;
cytnx_float* _inl = (cytnx_float*)inl->Mem;
cytnx_float* _inr = (cytnx_float*)inr->Mem;
// query working space :
checkCudaErrors(hipblasSgemm(cublasH,HIPBLAS_OP_N,HIPBLAS_OP_N,Nr,Ml,Comm,&alpha,_inr,Nr,_inl,Comm,&beta,_out,Nr));
hipblasDestroy(cublasH);
}
void cuMatmul_internal_i64(boost::intrusive_ptr<Storage_base> &out, const boost::intrusive_ptr<Storage_base> &inl, const boost::intrusive_ptr<Storage_base> &inr, const cytnx_int32 &Ml, const cytnx_int32 &Comm, const cytnx_int32 &Nr){
cytnx_int64* _out = (cytnx_int64*)out->Mem;
cytnx_int64* _inl = (cytnx_int64*)inl->Mem;
cytnx_int64* _inr = (cytnx_int64*)inr->Mem;
cytnx_uint64 Nblocks = (cytnx_uint64(Ml)*Nr)/512;
if((cytnx_uint64(Ml)*Nr)%512) Nblocks+=1;
hipLaunchKernelGGL(( cuMatMul_kernel), dim3(Nblocks),dim3(512), 0, 0, _out,_inl,_inr,Ml,Comm,Nr);
}
void cuMatmul_internal_u64(boost::intrusive_ptr<Storage_base> &out, const boost::intrusive_ptr<Storage_base> &inl, const boost::intrusive_ptr<Storage_base> &inr, const cytnx_int32 &Ml, const cytnx_int32 &Comm, const cytnx_int32 &Nr){
cytnx_uint64* _out = (cytnx_uint64*)out->Mem;
cytnx_uint64* _inl = (cytnx_uint64*)inl->Mem;
cytnx_uint64* _inr = (cytnx_uint64*)inr->Mem;
cytnx_uint64 Nblocks = (cytnx_uint64(Ml)*Nr)/512;
if((cytnx_uint64(Ml)*Nr)%512) Nblocks+=1;
hipLaunchKernelGGL(( cuMatMul_kernel), dim3(Nblocks),dim3(512), 0, 0, _out,_inl,_inr,Ml,Comm,Nr);
}
void cuMatmul_internal_i32(boost::intrusive_ptr<Storage_base> &out, const boost::intrusive_ptr<Storage_base> &inl, const boost::intrusive_ptr<Storage_base> &inr, const cytnx_int32 &Ml, const cytnx_int32 &Comm, const cytnx_int32 &Nr){
cytnx_int32* _out = (cytnx_int32*)out->Mem;
cytnx_int32* _inl = (cytnx_int32*)inl->Mem;
cytnx_int32* _inr = (cytnx_int32*)inr->Mem;
cytnx_uint64 Nblocks = (cytnx_uint64(Ml)*Nr)/512;
if((cytnx_uint64(Ml)*Nr)%512) Nblocks+=1;
hipLaunchKernelGGL(( cuMatMul_kernel), dim3(Nblocks),dim3(512), 0, 0, _out,_inl,_inr,Ml,Comm,Nr);
}
void cuMatmul_internal_u32(boost::intrusive_ptr<Storage_base> &out, const boost::intrusive_ptr<Storage_base> &inl, const boost::intrusive_ptr<Storage_base> &inr, const cytnx_int32 &Ml, const cytnx_int32 &Comm, const cytnx_int32 &Nr){
cytnx_uint32* _out = (cytnx_uint32*)out->Mem;
cytnx_uint32* _inl = (cytnx_uint32*)inl->Mem;
cytnx_uint32* _inr = (cytnx_uint32*)inr->Mem;
cytnx_uint64 Nblocks = (cytnx_uint64(Ml)*Nr)/512;
if((cytnx_uint64(Ml)*Nr)%512) Nblocks+=1;
hipLaunchKernelGGL(( cuMatMul_kernel), dim3(Nblocks),dim3(512), 0, 0, _out,_inl,_inr,Ml,Comm,Nr);
}
void cuMatmul_internal_i16(boost::intrusive_ptr<Storage_base> &out, const boost::intrusive_ptr<Storage_base> &inl, const boost::intrusive_ptr<Storage_base> &inr, const cytnx_int32 &Ml, const cytnx_int32 &Comm, const cytnx_int32 &Nr){
cytnx_int16* _out = (cytnx_int16*)out->Mem;
cytnx_int16* _inl = (cytnx_int16*)inl->Mem;
cytnx_int16* _inr = (cytnx_int16*)inr->Mem;
cytnx_uint64 Nblocks = (cytnx_uint64(Ml)*Nr)/512;
if((cytnx_uint64(Ml)*Nr)%512) Nblocks+=1;
hipLaunchKernelGGL(( cuMatMul_kernel), dim3(Nblocks),dim3(512), 0, 0, _out,_inl,_inr,Ml,Comm,Nr);
}
void cuMatmul_internal_u16(boost::intrusive_ptr<Storage_base> &out, const boost::intrusive_ptr<Storage_base> &inl, const boost::intrusive_ptr<Storage_base> &inr, const cytnx_int32 &Ml, const cytnx_int32 &Comm, const cytnx_int32 &Nr){
cytnx_uint16* _out = (cytnx_uint16*)out->Mem;
cytnx_uint16* _inl = (cytnx_uint16*)inl->Mem;
cytnx_uint16* _inr = (cytnx_uint16*)inr->Mem;
cytnx_uint64 Nblocks = (cytnx_uint64(Ml)*Nr)/512;
if((cytnx_uint64(Ml)*Nr)%512) Nblocks+=1;
hipLaunchKernelGGL(( cuMatMul_kernel), dim3(Nblocks),dim3(512), 0, 0, _out,_inl,_inr,Ml,Comm,Nr);
}
void cuMatmul_internal_b(boost::intrusive_ptr<Storage_base> &out, const boost::intrusive_ptr<Storage_base> &inl, const boost::intrusive_ptr<Storage_base> &inr, const cytnx_int32 &Ml, const cytnx_int32 &Comm, const cytnx_int32 &Nr){
cytnx_bool* _out = (cytnx_bool*)out->Mem;
cytnx_bool* _inl = (cytnx_bool*)inl->Mem;
cytnx_bool* _inr = (cytnx_bool*)inr->Mem;
cytnx_uint64 Nblocks = (cytnx_uint64(Ml)*Nr)/512;
if((cytnx_uint64(Ml)*Nr)%512) Nblocks+=1;
hipLaunchKernelGGL(( cuMatMul_kernel), dim3(Nblocks),dim3(512), 0, 0, _out,_inl,_inr,Ml,Comm,Nr);
}
}
}
| 208e81dbebad3975e734fb6b2a66611652b8cb60.cu | #include "linalg/linalg_internal_gpu/cuMatmul_internal.hpp"
#include "cytnx_error.hpp"
#include "Type.hpp"
#include "utils/lapack_wrapper.h"
namespace cytnx{
namespace linalg_internal{
template<typename UniType>
__global__ void cuMatMul_kernel(UniType *out, const UniType *inl, const UniType *inr, cytnx_int32 Ml, cytnx_int32 Comm, cytnx_int32 Nr){
UniType tmp=0;
cytnx_uint64 sid = blockIdx.x*blockDim.x + threadIdx.x;
if(sid < cytnx_uint64(Ml)*Nr){
for(cytnx_int32 c=0;c<Comm;c++){
tmp += inl[(sid/Nr)*Comm+c]*inr[c*Nr+sid%Nr];
}
out[(sid/Nr)*Comm + sid%Nr] = tmp;
}
}
/// cuMatmul
void cuMatmul_internal_cd(boost::intrusive_ptr<Storage_base> &out, const boost::intrusive_ptr<Storage_base> &inl, const boost::intrusive_ptr<Storage_base> &inr, const cytnx_int32 &Ml, const cytnx_int32 &Comm, const cytnx_int32 &Nr){
// create handles:
cublasHandle_t cublasH = NULL;
checkCudaErrors(cublasCreate(&cublasH));
cytnx_complex128 alpha = cytnx_complex128(1,0), beta=cytnx_complex128(0,0);
cuDoubleComplex* _out = (cuDoubleComplex*)out->Mem;
cuDoubleComplex* _inl = (cuDoubleComplex*)inl->Mem;
cuDoubleComplex* _inr = (cuDoubleComplex*)inr->Mem;
// query working space :
checkCudaErrors(cublasZgemm(cublasH,CUBLAS_OP_N,CUBLAS_OP_N,Nr,Ml,Comm,(cuDoubleComplex*)&alpha,_inr,Nr,_inl,Comm,(cuDoubleComplex*)&beta,_out,Nr));
cublasDestroy(cublasH);
}
void cuMatmul_internal_cf(boost::intrusive_ptr<Storage_base> &out, const boost::intrusive_ptr<Storage_base> &inl, const boost::intrusive_ptr<Storage_base> &inr, const cytnx_int32 &Ml, const cytnx_int32 &Comm, const cytnx_int32 &Nr){
// create handles:
cublasHandle_t cublasH = NULL;
checkCudaErrors(cublasCreate(&cublasH));
cytnx_complex64 alpha = cytnx_complex64(1,0), beta=cytnx_complex64(0,0);
cuFloatComplex* _out = (cuFloatComplex*)out->Mem;
cuFloatComplex* _inl = (cuFloatComplex*)inl->Mem;
cuFloatComplex* _inr = (cuFloatComplex*)inr->Mem;
// query working space :
checkCudaErrors(cublasCgemm(cublasH,CUBLAS_OP_N,CUBLAS_OP_N,Nr,Ml,Comm,(cuFloatComplex*)&alpha,_inr,Nr,_inl,Comm,(cuFloatComplex*)&beta,_out,Nr));
cublasDestroy(cublasH);
}
void cuMatmul_internal_d(boost::intrusive_ptr<Storage_base> &out, const boost::intrusive_ptr<Storage_base> &inl, const boost::intrusive_ptr<Storage_base> &inr, const cytnx_int32 &Ml, const cytnx_int32 &Comm, const cytnx_int32 &Nr){
// create handles:
cublasHandle_t cublasH = NULL;
checkCudaErrors(cublasCreate(&cublasH));
cytnx_double alpha = 1, beta=0;
cytnx_double* _out = (cytnx_double*)out->Mem;
cytnx_double* _inl = (cytnx_double*)inl->Mem;
cytnx_double* _inr = (cytnx_double*)inr->Mem;
// query working space :
checkCudaErrors(cublasDgemm(cublasH,CUBLAS_OP_N,CUBLAS_OP_N,Nr,Ml,Comm,&alpha,_inr,Nr,_inl,Comm,&beta,_out,Nr));
cublasDestroy(cublasH);
}
void cuMatmul_internal_f(boost::intrusive_ptr<Storage_base> &out, const boost::intrusive_ptr<Storage_base> &inl, const boost::intrusive_ptr<Storage_base> &inr, const cytnx_int32 &Ml, const cytnx_int32 &Comm, const cytnx_int32 &Nr){
// create handles:
cublasHandle_t cublasH = NULL;
checkCudaErrors(cublasCreate(&cublasH));
cytnx_float alpha = 1, beta=0;
cytnx_float* _out = (cytnx_float*)out->Mem;
cytnx_float* _inl = (cytnx_float*)inl->Mem;
cytnx_float* _inr = (cytnx_float*)inr->Mem;
// query working space :
checkCudaErrors(cublasSgemm(cublasH,CUBLAS_OP_N,CUBLAS_OP_N,Nr,Ml,Comm,&alpha,_inr,Nr,_inl,Comm,&beta,_out,Nr));
cublasDestroy(cublasH);
}
void cuMatmul_internal_i64(boost::intrusive_ptr<Storage_base> &out, const boost::intrusive_ptr<Storage_base> &inl, const boost::intrusive_ptr<Storage_base> &inr, const cytnx_int32 &Ml, const cytnx_int32 &Comm, const cytnx_int32 &Nr){
cytnx_int64* _out = (cytnx_int64*)out->Mem;
cytnx_int64* _inl = (cytnx_int64*)inl->Mem;
cytnx_int64* _inr = (cytnx_int64*)inr->Mem;
cytnx_uint64 Nblocks = (cytnx_uint64(Ml)*Nr)/512;
if((cytnx_uint64(Ml)*Nr)%512) Nblocks+=1;
cuMatMul_kernel<<<Nblocks,512>>>(_out,_inl,_inr,Ml,Comm,Nr);
}
void cuMatmul_internal_u64(boost::intrusive_ptr<Storage_base> &out, const boost::intrusive_ptr<Storage_base> &inl, const boost::intrusive_ptr<Storage_base> &inr, const cytnx_int32 &Ml, const cytnx_int32 &Comm, const cytnx_int32 &Nr){
cytnx_uint64* _out = (cytnx_uint64*)out->Mem;
cytnx_uint64* _inl = (cytnx_uint64*)inl->Mem;
cytnx_uint64* _inr = (cytnx_uint64*)inr->Mem;
cytnx_uint64 Nblocks = (cytnx_uint64(Ml)*Nr)/512;
if((cytnx_uint64(Ml)*Nr)%512) Nblocks+=1;
cuMatMul_kernel<<<Nblocks,512>>>(_out,_inl,_inr,Ml,Comm,Nr);
}
void cuMatmul_internal_i32(boost::intrusive_ptr<Storage_base> &out, const boost::intrusive_ptr<Storage_base> &inl, const boost::intrusive_ptr<Storage_base> &inr, const cytnx_int32 &Ml, const cytnx_int32 &Comm, const cytnx_int32 &Nr){
cytnx_int32* _out = (cytnx_int32*)out->Mem;
cytnx_int32* _inl = (cytnx_int32*)inl->Mem;
cytnx_int32* _inr = (cytnx_int32*)inr->Mem;
cytnx_uint64 Nblocks = (cytnx_uint64(Ml)*Nr)/512;
if((cytnx_uint64(Ml)*Nr)%512) Nblocks+=1;
cuMatMul_kernel<<<Nblocks,512>>>(_out,_inl,_inr,Ml,Comm,Nr);
}
void cuMatmul_internal_u32(boost::intrusive_ptr<Storage_base> &out, const boost::intrusive_ptr<Storage_base> &inl, const boost::intrusive_ptr<Storage_base> &inr, const cytnx_int32 &Ml, const cytnx_int32 &Comm, const cytnx_int32 &Nr){
cytnx_uint32* _out = (cytnx_uint32*)out->Mem;
cytnx_uint32* _inl = (cytnx_uint32*)inl->Mem;
cytnx_uint32* _inr = (cytnx_uint32*)inr->Mem;
cytnx_uint64 Nblocks = (cytnx_uint64(Ml)*Nr)/512;
if((cytnx_uint64(Ml)*Nr)%512) Nblocks+=1;
cuMatMul_kernel<<<Nblocks,512>>>(_out,_inl,_inr,Ml,Comm,Nr);
}
void cuMatmul_internal_i16(boost::intrusive_ptr<Storage_base> &out, const boost::intrusive_ptr<Storage_base> &inl, const boost::intrusive_ptr<Storage_base> &inr, const cytnx_int32 &Ml, const cytnx_int32 &Comm, const cytnx_int32 &Nr){
cytnx_int16* _out = (cytnx_int16*)out->Mem;
cytnx_int16* _inl = (cytnx_int16*)inl->Mem;
cytnx_int16* _inr = (cytnx_int16*)inr->Mem;
cytnx_uint64 Nblocks = (cytnx_uint64(Ml)*Nr)/512;
if((cytnx_uint64(Ml)*Nr)%512) Nblocks+=1;
cuMatMul_kernel<<<Nblocks,512>>>(_out,_inl,_inr,Ml,Comm,Nr);
}
void cuMatmul_internal_u16(boost::intrusive_ptr<Storage_base> &out, const boost::intrusive_ptr<Storage_base> &inl, const boost::intrusive_ptr<Storage_base> &inr, const cytnx_int32 &Ml, const cytnx_int32 &Comm, const cytnx_int32 &Nr){
cytnx_uint16* _out = (cytnx_uint16*)out->Mem;
cytnx_uint16* _inl = (cytnx_uint16*)inl->Mem;
cytnx_uint16* _inr = (cytnx_uint16*)inr->Mem;
cytnx_uint64 Nblocks = (cytnx_uint64(Ml)*Nr)/512;
if((cytnx_uint64(Ml)*Nr)%512) Nblocks+=1;
cuMatMul_kernel<<<Nblocks,512>>>(_out,_inl,_inr,Ml,Comm,Nr);
}
void cuMatmul_internal_b(boost::intrusive_ptr<Storage_base> &out, const boost::intrusive_ptr<Storage_base> &inl, const boost::intrusive_ptr<Storage_base> &inr, const cytnx_int32 &Ml, const cytnx_int32 &Comm, const cytnx_int32 &Nr){
cytnx_bool* _out = (cytnx_bool*)out->Mem;
cytnx_bool* _inl = (cytnx_bool*)inl->Mem;
cytnx_bool* _inr = (cytnx_bool*)inr->Mem;
cytnx_uint64 Nblocks = (cytnx_uint64(Ml)*Nr)/512;
if((cytnx_uint64(Ml)*Nr)%512) Nblocks+=1;
cuMatMul_kernel<<<Nblocks,512>>>(_out,_inl,_inr,Ml,Comm,Nr);
}
}
}
|
be3c53ee5145959b563aa4dafe619f5eba03d34e.hip | // !!! This is a file automatically generated by hipify!!!
//////////////////////////////////////////////////////////////////////////////////
//DispROMS_GPU //
//Copyright (C) 2013 Bosserelle //
// //
//This program is free software: you can redistribute it and/or modify //
//it under the terms of the GNU General Public License as published by //
//the Free Software Foundation. //
// //
//This program is distributed in the hope that it will be useful, //
//but WITHOUT ANY WARRANTY; without even the implied warranty of //
//MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the //
//GNU General Public License for more details. //
// //
//You should have received a copy of the GNU General Public License //
//along with this program. If not, see <http://www.gnu.org/licenses/>. //
//////////////////////////////////////////////////////////////////////////////////
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <cmath>
#include <fstream>
#include <netcdf.h>
#include <algorithm>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <math.h>
// includes, GL
#include <GL/glew.h>
#include <GL/glut.h>
#include <cuda_gl_interop.h>
#include <hip/hip_vector_types.h>
#define pi 3.14159265
#include "DispROMS_kernel.cu"
char ncfile[256];
char ncoutfile[256];
int nxiu,nxiv,netau,netav,nl,nt;
float *Uo,*Un;
float *Vo,*Vn;
float *Uo_g,*Un_g,*Ux_g;
float *Vo_g,*Vn_g,*Vx_g;
float *Umask, *Vmask, *Umask_g, *Vmask_g;
float *Nincel,*cNincel,*cTincel;
float *Nincel_g,*cNincel_g,*cTincel_g;
float *distXU, *distYU, *distXV, *distYV;
float *lat_u,*lon_u,*lat_v,*lon_v;
int hdstep,hdstart,hdend;
int lev;
float hddt;
int stp,outstep,nextoutstep,outtype;
float *xp,*yp,*zp,*tp;
float *xl,*yl;
float *xp_g,*yp_g,*zp_g,*tp_g;
float *xl_g,*yl_g;
//particle properties
int npart,backswitch;
float dt,Eh,Ev,minrwdepth;
int GPUDEV=0;
int SEED = 777;
float * d_Rand; //GPU random number
hiprandGenerator_t gen;
hipError_t CUDerr;
hipArray* Ux_gp;
hipArray* Vx_gp;
hipArray* distXU_gp;
hipArray* distYU_gp;
hipArray* distXV_gp;
hipArray* distYV_gp;
hipArray* lon_ugp;
hipArray* lon_vgp;
hipArray* lat_ugp;
hipArray* lat_vgp;
hipChannelFormatDesc channelDescU = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipChannelFormatDesc channelDescV = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipChannelFormatDesc channelDescdXU = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipChannelFormatDesc channelDescdXV = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipChannelFormatDesc channelDescdYU = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipChannelFormatDesc channelDescdYV = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipChannelFormatDesc channelDesclonu = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipChannelFormatDesc channelDesclonv = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipChannelFormatDesc channelDesclatu = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipChannelFormatDesc channelDesclatv = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
/////////////////////////////////
//Define external functions
////////////////////////////////
void writexyz(float * x, float * y, float * z,float *t,float *xl,float *yl, int npart,char outfile[]);
void readgridsize(char ncfile[],int &nxiu,int &nxiv,int &netau,int &netav,int &nl,int &nt);
void readHDstep(char ncfile[],int nxiu,int nxiv,int netau,int netav,int nl,int nt, int hdstep,int lev,float *&Uo,float *&Vo);
void readlatlon(char ncfile[],int nxiu,int nxiv,int netau,int netav,float *&lat_u,float *&lon_u,float *&lat_v,float *&lon_v);
void readUVmask(char ncfile[],int nxiu,int nxiv,int netau,int netav,float *&Uo,float *&Vo);
void creatncfile(char outfile[], int nx,int ny,float *xval, float *yval,float totaltime,float *Nincel,float *cNincel,float *cTincel);
void writestep2nc(char outfile[], int nx,int ny,float totaltime,float *Nincel,float *cNincel,float * cTincel);
template <class T> const T& min (const T& a, const T& b);
template <class T> const T& max (const T& a, const T& b);
template <class T> const T& round(const T& a);
void CUDA_CHECK(hipError_t CUDerr)
{
if( hipSuccess != CUDerr) {
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, hipGetErrorString( CUDerr) );
exit(EXIT_FAILURE);
}
}
void runCuda(void)
{
float data_nu=netau*nxiu;
float data_nv=netav*nxiv;
dim3 blockDimHD(16, 1, 1);
dim3 gridDimHD(ceil(max(netau,netav)*max(nxiu,nxiv) / (float)blockDimHD.x), 1, 1);
if(stp*dt>=hddt*(hdstep-hdstart+1))//Not sure about the +1
{
//Read next step
hdstep++;
int steptoread=hdstep;
if (backswitch>0)
{
steptoread=hdend-hdstep;
}
readHDstep(ncfile,nxiu,nxiv,netau,netav,nl,nt,steptoread,lev,Un,Vn);
hipLaunchKernelGGL(( NextHDstep), dim3(gridDimHD), dim3(blockDimHD), 0, 0, data_nu,Uo_g,Un_g);
CUDA_CHECK( hipDeviceSynchronize() );
hipLaunchKernelGGL(( NextHDstep), dim3(gridDimHD), dim3(blockDimHD), 0, 0, data_nv,Vo_g,Vn_g);
CUDA_CHECK( hipDeviceSynchronize() );
CUDA_CHECK( hipMemcpy(Un_g, Un, data_nu*sizeof(float ), hipMemcpyHostToDevice) );
CUDA_CHECK( hipMemcpy(Vn_g, Vn, data_nv*sizeof(float ), hipMemcpyHostToDevice) );
}
hipLaunchKernelGGL(( ResetNincel), dim3(gridDimHD), dim3(blockDimHD), 0, 0, data_nu,Nincel_g);
CUDA_CHECK( hipDeviceSynchronize() );
int interpstep=hdstep-hdstart+1;
hipLaunchKernelGGL(( HD_interp), dim3(gridDimHD), dim3(blockDimHD), 0, 0, data_nu,stp,backswitch,interpstep,dt,hddt/*,Umask_g*/,Uo_g,Un_g,Ux_g);
CUDA_CHECK( hipDeviceSynchronize() );
hipLaunchKernelGGL(( HD_interp), dim3(gridDimHD), dim3(blockDimHD), 0, 0, data_nv,stp,backswitch,interpstep,dt,hddt/*,Vmask_g*/,Vo_g,Vn_g,Vx_g);
CUDA_CHECK( hipDeviceSynchronize() );
CUDA_CHECK( hipMemcpyToArray( Ux_gp, 0, 0, Ux_g, data_nu* sizeof(float), hipMemcpyDeviceToDevice));
CUDA_CHECK( hipMemcpyToArray( Vx_gp, 0, 0, Vx_g, data_nv* sizeof(float), hipMemcpyDeviceToDevice));
//Generate some random numbers
// Set seed
//hiprandSetPseudoRandomGeneratorSeed(gen, SEED);
// Generate n floats on device
hiprandGenerateUniform(gen, d_Rand, npart);
//run the model
int nbblocks=npart/256;
dim3 blockDim(256, 1, 1);
dim3 gridDim(npart / blockDim.x, 1, 1);
//Calculate particle step
hipLaunchKernelGGL(( updatepartpos), dim3(gridDim), dim3(blockDim), 0, 0, npart,dt,Eh,d_Rand,xl_g, yl_g,zp_g,tp_g);
CUDA_CHECK( hipDeviceSynchronize() );
hipLaunchKernelGGL(( ij2lonlat), dim3(gridDim), dim3(blockDim), 0, 0, npart,xl_g,yl_g,xp_g,yp_g);
CUDA_CHECK( hipDeviceSynchronize() );
hipLaunchKernelGGL(( CalcNincel), dim3(gridDim), dim3(blockDim), 0, 0, npart,nxiu,netau,xl_g, yl_g,tp_g,Nincel_g,cNincel_g,cTincel_g);
CUDA_CHECK( hipDeviceSynchronize() );
}
int main(int argc, char **argv)
{
//////////////////////////////////////////////////////
///// Read Operational file /////
//////////////////////////////////////////////////////
char opfile[]="Disp_G3.dat";
//char hdfile[256];
char seedfile[256];
float xcenter;
float ycenter;
float LL;
float HH;
///////////////////////////////////////////////////////////
//Read Operational file
///////////////////////////////////////////////////////////
FILE * fop;
fop=fopen(opfile,"r");
fscanf(fop,"%*s %s\t%*s",&ncfile);
fscanf(fop,"%d\t%*s",&GPUDEV);
fscanf(fop,"%f\t%*s",&hddt);
fscanf(fop,"%d\t%*s",&lev);
fscanf(fop,"%d,%d\t%*s",&hdstart,&hdend);
fscanf(fop,"%u\t%*s",&npart);
fscanf(fop,"%d\t%*s",&backswitch);
fscanf(fop,"%f\t%*s",&dt);
fscanf(fop,"%f\t%*s",&Eh);
fscanf(fop,"%f\t%*s",&Ev);
fscanf(fop,"%f\t%*s",&minrwdepth);
fscanf(fop,"%f,%f\t%*s",&xcenter,&ycenter);
fscanf(fop,"%f,%f\t%*s",&LL,&HH);
fscanf(fop,"%s\t%*s",&seedfile);
fscanf(fop,"%d\t%*s",&outtype);
fscanf(fop,"%d\t%*s",&outstep);
fscanf(fop,"%s\t%*s",&ncoutfile);
fclose(fop);
printf(" ncfile:%s\n Hddt:%f\t lev:%d\n Hdstart:%d \t Hdstop:%d\n npart:%d\n dt:%f\n Eh:%f\t Ev:%f\n Mindepth:%f\n Xcenter:%f\t Ycenter:%f\n LL:%f\t HH:%f\n Seed file:%s\n",ncfile,hddt,lev,hdstart,hdend,npart,dt,Eh,Ev,minrwdepth,xcenter,ycenter,LL,HH,seedfile);
//read the dimentions of grid, levels and time
printf("Read nc file dimensions... ");
readgridsize(ncfile,nxiu,nxiv,netau,netav,nl,nt);
printf("...done\n");
///////////////////////////
//Allocate Memory on CPU //
///////////////////////////
printf("Allocate CPU memory... ");
//Vel ARRAYS
Uo= (float *)malloc(nxiu*netau*sizeof(float ));
Un= (float *)malloc(nxiu*netau*sizeof(float ));
Vo= (float *)malloc(nxiv*netav*sizeof(float ));
Vn= (float *)malloc(nxiv*netav*sizeof(float ));
Umask= (float *)malloc(nxiu*netau*sizeof(float ));
Vmask= (float *)malloc(nxiv*netav*sizeof(float ));
//Lat and Long for each array
lat_u= (float *)malloc(nxiu*netau*sizeof(float ));
lon_u= (float *)malloc(nxiu*netau*sizeof(float ));
lat_v= (float *)malloc(nxiv*netav*sizeof(float ));
lon_v= (float *)malloc(nxiv*netav*sizeof(float ));
//Distance arrays
distXU=(float *)malloc(nxiu*netau*sizeof(float ));
distYU=(float *)malloc(nxiu*netau*sizeof(float ));
distXV=(float *)malloc(nxiv*netav*sizeof(float ));
distYV=(float *)malloc(nxiv*netav*sizeof(float ));
//particles
xp = (float *)malloc(npart*sizeof(float));
yp = (float *)malloc(npart*sizeof(float));
zp = (float *)malloc(npart*sizeof(float));
tp = (float *)malloc(npart*sizeof(float));
xl = (float *)malloc(npart*sizeof(float));
yl = (float *)malloc(npart*sizeof(float));
//Nincel
Nincel= (float *)malloc(nxiu*netau*sizeof(float ));
cNincel= (float *)malloc(nxiu*netau*sizeof(float ));
cTincel= (float *)malloc(nxiu*netau*sizeof(float ));
for (int i=0; i<nxiu; i++)
{
for (int j=0; j<netau; j++)
{
Nincel[i+j*nxiu]=0.0f;
}
}
printf("...done\n");
//read lat and lon
printf("Read lat lon array... ");
readlatlon(ncfile,nxiu,nxiv,netau,netav,lat_u,lon_u,lat_v,lon_v);
printf(" ...Calculate distance array...");
float R = 6372797.560856;
//Calculate the distance in meter between each cells for u grid
for (int i=0; i<nxiu-1; i++)
{
for (int j=0; j<netau; j++)
{
//calc distance between each i using haversine formula
float dlat=(lat_u[(i+1)+j*nxiu]-lat_u[i+j*nxiu])*pi/180.0f;
float lat1=lat_u[i+j*nxiu]*pi/180.0f;
float lat2=lat_u[(i+1)+j*nxiu]*pi/180.0f;
float dlon=(lon_u[(i+1)+j*nxiu]-lon_u[i+j*nxiu])*pi/180.0f;
float a=sin(dlat/2)*sin(dlat/2)+cos(lat1)*cos(lat2)*sin(dlon/2)*sin(dlon/2);
float c=2*atan2f(sqrtf(a),sqrtf(1-a));
distXU[i+j*nxiu]=c*R;
}
}
for (int i=0; i<nxiu; i++)
{
for (int j=0; j<netau-1; j++)
{
//calc distance between each j using haversine formula
float dlat=(lat_u[i+(j+1)*nxiu]-lat_u[i+j*nxiu])*pi/180.0f;
float lat1=lat_u[i+j*nxiu]*pi/180.0f;
float lat2=lat_u[i+(j+1)*nxiu]*pi/180.0f;
float dlon=(lon_u[i+(j+1)*nxiu]-lon_u[i+j*nxiu])*pi/180.0f;
float a=sin(dlat/2)*sin(dlat/2)+cos(lat1)*cos(lat2)*sin(dlon/2)*sin(dlon/2);
float c=2*atan2f(sqrtf(a),sqrtf(1-a));
distYU[i+j*nxiu]=c*R;
}
}
//fill in boundaries
for (int j=0; j<netau; j++)
{
//
distXU[nxiu-1+j*nxiu]=distXU[nxiu-2+j*nxiu];
}
for (int i=0; i<nxiu; i++)
{
//
distYU[i+(netau-1)*nxiu]=distYU[i+(netau-2)*nxiu];
}
//Vdirection
for (int i=0; i<nxiu-1; i++)
{
for (int j=0; j<netau; j++)
{
//calc distance between each i using haversine formula
float dlat=(lat_v[(i+1)+j*nxiu]-lat_v[i+j*nxiu])*pi/180.0f;
float lat1=lat_v[i+j*nxiu]*pi/180.0f;
float lat2=lat_v[(i+1)+j*nxiu]*pi/180.0f;
float dlon=(lon_v[(i+1)+j*nxiu]-lon_v[i+j*nxiu])*pi/180.0f;
float a=sin(dlat/2)*sin(dlat/2)+cos(lat1)*cos(lat2)*sin(dlon/2)*sin(dlon/2);
float c=2*atan2f(sqrtf(a),sqrtf(1-a));
distXV[i+j*nxiu]=c*R;
}
}
for (int i=0; i<nxiu; i++)
{
for (int j=0; j<netau-1; j++)
{
//calc distance between each j using haversine formula
float dlat=(lat_v[i+(j+1)*nxiu]-lat_v[i+j*nxiu])*pi/180.0f;
float lat1=lat_v[i+j*nxiu]*pi/180.0f;
float lat2=lat_v[i+(j+1)*nxiu]*pi/180.0f;
float dlon=(lon_v[i+(j+1)*nxiu]-lon_v[i+j*nxiu])*pi/180.0f;
float a=sin(dlat/2)*sin(dlat/2)+cos(lat1)*cos(lat2)*sin(dlon/2)*sin(dlon/2);
float c=2*atan2f(sqrtf(a),sqrtf(1-a));
distYV[i+j*nxiu]=c*R;
}
}
//fill in boundaries
for (int j=0; j<netau; j++)
{
//
distXV[nxiu-1+j*nxiu]=distXV[nxiu-2+j*nxiu];
}
for (int i=0; i<nxiu; i++)
{
//
distYV[i+(netau-1)*nxiu]=distYV[i+(netau-2)*nxiu];
}
printf("...done\n");
//Calculate first HD step
//outstep=10;
stp=0;//hdstart*hddt/dt;
hdstep=hdstart;
nextoutstep=outstep+stp;
//printf("HD step:%d\n ",hdstep);
if (hdend==0)
{
hdend=nt-1;
}
int steptoread=hdstep;
if (backswitch>0)
{
steptoread=hdend-hdstep;
}
//////////////////////////////
//Read first step in Hd model
///////////////////////////////
//Read U and V mask
//readUVmask(ncfile,nxiu,nxiv,netau,netav,Umask,Vmask);
//printf("Read Hd model first step... ");
readHDstep(ncfile,nxiu,nxiv,netau,netav,nl,nt,steptoread,lev,Uo,Vo);
//printf("...done\n");
//////////////////////////////
// Init Particle position
//////////////////////////////
char noseedfile[] = "seed";
if (strcmp(seedfile,noseedfile)!=0)
{
printf("...reading seed file.\n");
FILE * fsd;
int nseedpos;
//read input data:
fsd=fopen(seedfile,"r");
fscanf(fsd,"%u",&nseedpos);
for(int ppos=0; ppos<min(nseedpos,npart); ppos++)
{
fscanf(fsd,"%f %f %f %f %f %f",&xp[ppos],&yp[ppos],&zp[ppos],&tp[ppos],&xl[ppos],&yl[ppos]);
}
if (nseedpos<npart)
{
printf("WARNING there are less seed positions in file than particles seed position will be repeated");
for (int rppos=0; rppos<(npart-nseedpos);rppos++)
{
xp[nseedpos+rppos]=xp[rppos];
yp[nseedpos+rppos]=yp[rppos];
zp[nseedpos+rppos]=zp[rppos];
tp[nseedpos+rppos]=tp[rppos];
xl[nseedpos+rppos]=xl[rppos];
yl[nseedpos+rppos]=yl[rppos];
}
}
fclose(fsd);
}
else
{
printf("Generating particle initial position in CPU mem...");
//Generating input data on CPU
//Set initial position for particle
float dlat=0.1;
float dlon=0.1;
float a,c,d;
float R=6371000;
float dlatrad,dlonrad,ycenterrad;
dlatrad=dlat*pi/180;
dlonrad=dlon*pi/180;
ycenterrad=ycenter*pi/180;
a=pow(sin(dlatrad/2),2)+cos(ycenterrad)*cos(ycenterrad+dlatrad)*pow(sin(dlonrad/2),2);
c=2*atan2(sqrt(a),sqrt(1-a));
d=R*c;
LL=LL*dlon/d;
HH=HH*dlat/d;
float ddx=sqrt(LL*HH/npart);
for(int i = 0; i < npart; i++)
{
// initialize random seed:
float dist;
int minki=0;
int minkj=0;
int test;
float mindist=100000;
zp[i] = 0.5f;
tp[i] = 0.0f;
xp[i]=(xcenter-LL/2)+ddx*(i - floor(((float)i)/round(LL/ddx))*round(LL/ddx));
//printf("xp[%d]=%f\n",i,xp[i]);
yp[i]=(ycenter-HH/2)+ddx*(floor((float)i/(LL/ddx)));
//printf("yp[%d]=%f\n",i,yp[i]);
//
for (int kj=0;kj<netau-1;kj++)
{
for (int ki=0; ki<nxiu-1;ki++)
{
dist=sqrt((lon_u[ki+kj*nxiu]-xp[i])*(lon_u[ki+kj*nxiu]-xp[i])+(lat_u[ki+kj*nxiu]-yp[i])*(lat_u[ki+kj*nxiu]-yp[i]));
if (dist<mindist)
{
mindist=dist;
minki=ki;
minkj=kj;
}
}
}
//Yes I know pretty lazy stuff it is for quick seeding if you want the proper stuff do a seed file...
xp[i]=lon_u[minki+minkj*nxiu];
yp[i]=lat_u[minki+minkj*nxiu];
xl[i]=minki;
yl[i]=minkj;
//xlv[i]=minki+0.5;in i and j coordinate it is jut off by half the node
//ylu[i]=minkj-0.5;
}
}
printf(" ...done\n");
/////////////////////////////
//Prepare GPU
////////////////////////////
// Init GPU data
int GPUDEVICE=GPUDEV;
CUDA_CHECK(hipSetDevice(GPUDEVICE));
//CUT_DEVICE_INIT(argc, argv);
/////////////////////////////////////
// ALLOCATE GPU MEMORY
/////////////////////////////////
printf("Allocating GPU memory... ");
float DATA_SZ=npart*sizeof(float);
CUDA_CHECK(hipMalloc((void **)&xp_g, DATA_SZ));
CUDA_CHECK(hipMalloc((void **)&yp_g, DATA_SZ));
CUDA_CHECK(hipMalloc((void **)&zp_g, DATA_SZ));
CUDA_CHECK(hipMalloc((void **)&tp_g, DATA_SZ));
CUDA_CHECK(hipMalloc((void **)&xl_g, DATA_SZ));
CUDA_CHECK(hipMalloc((void **)&yl_g, DATA_SZ));
CUDA_CHECK(hipMalloc((void **)&Uo_g, netau*nxiu* sizeof(float)));
CUDA_CHECK(hipMalloc((void **)&Un_g, netau*nxiu* sizeof(float)));
CUDA_CHECK(hipMalloc((void **)&Ux_g, netau*nxiu* sizeof(float)));
CUDA_CHECK(hipMalloc((void **)&Vo_g, netav*nxiv* sizeof(float)));
CUDA_CHECK(hipMalloc((void **)&Vn_g, netav*nxiv* sizeof(float)));
CUDA_CHECK(hipMalloc((void **)&Vx_g, netav*nxiv* sizeof(float)));
CUDA_CHECK(hipMalloc((void **)&Nincel_g, netau*nxiu* sizeof(float)));
CUDA_CHECK(hipMalloc((void **)&cNincel_g, netau*nxiu* sizeof(float)));
CUDA_CHECK(hipMalloc((void **)&cTincel_g, netau*nxiu* sizeof(float)));
CUDA_CHECK(hipMalloc((void **)&Umask_g, netau*nxiu* sizeof(float)));
CUDA_CHECK(hipMalloc((void **)&Vmask_g, netav*nxiv* sizeof(float)));
printf(" ...done\n");
printf("Transfert vectors to GPU memory... ");
CUDA_CHECK( hipMemcpy(Uo_g, Uo, netau*nxiu*sizeof(float ), hipMemcpyHostToDevice) );
CUDA_CHECK( hipMemcpy(Un_g, Uo, netau*nxiu*sizeof(float ), hipMemcpyHostToDevice) );
CUDA_CHECK( hipMemcpy(Ux_g, Uo, netau*nxiu*sizeof(float ), hipMemcpyHostToDevice) );
CUDA_CHECK( hipMemcpy(Vo_g, Vo, netav*nxiv*sizeof(float ), hipMemcpyHostToDevice) );
CUDA_CHECK( hipMemcpy(Vn_g, Vo, netav*nxiv*sizeof(float ), hipMemcpyHostToDevice) );
CUDA_CHECK( hipMemcpy(Vx_g, Vo, netav*nxiv*sizeof(float ), hipMemcpyHostToDevice) );
CUDA_CHECK( hipMemcpy(Umask_g, Umask, netau*nxiu*sizeof(float ), hipMemcpyHostToDevice) );
CUDA_CHECK( hipMemcpy(Vmask_g, Vmask, netav*nxiv*sizeof(float ), hipMemcpyHostToDevice) );
CUDA_CHECK( hipMemcpy(Nincel_g, Nincel, netau*nxiu*sizeof(float ), hipMemcpyHostToDevice) );
CUDA_CHECK( hipMemcpy(cNincel_g, Nincel, netau*nxiu*sizeof(float ), hipMemcpyHostToDevice) );
CUDA_CHECK( hipMemcpy(cTincel_g, Nincel, netau*nxiu*sizeof(float ), hipMemcpyHostToDevice) );
CUDA_CHECK( hipMemcpy(xp_g, xp, npart*sizeof(float ), hipMemcpyHostToDevice) );
CUDA_CHECK( hipMemcpy(yp_g, yp, npart*sizeof(float ), hipMemcpyHostToDevice) );
CUDA_CHECK( hipMemcpy(zp_g, zp, npart*sizeof(float ), hipMemcpyHostToDevice) );
CUDA_CHECK( hipMemcpy(tp_g, tp, npart*sizeof(float ), hipMemcpyHostToDevice) );
CUDA_CHECK( hipMemcpy(xl_g, xl, npart*sizeof(float ), hipMemcpyHostToDevice) );
CUDA_CHECK( hipMemcpy(yl_g, yl, npart*sizeof(float ), hipMemcpyHostToDevice) );
// Loading random number generator
hiprandCreateGenerator(&gen,HIPRAND_RNG_PSEUDO_DEFAULT);
CUDA_CHECK( hipMalloc((void **)&d_Rand,npart*sizeof(float)) );
printf(" ...done\n");
printf("Create textures on GPU memory... ");
// Copy velocity arrays
CUDA_CHECK( hipMallocArray( &Ux_gp, &channelDescU, nxiu,netau ));
CUDA_CHECK( hipMallocArray( &Vx_gp, &channelDescV, nxiv,netav ));
CUDA_CHECK( hipMemcpyToArray( Ux_gp, 0, 0, Uo, netau*nxiu* sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK( hipMemcpyToArray( Vx_gp, 0, 0, Vo, netav*nxiv* sizeof(float), hipMemcpyHostToDevice));
texU.addressMode[0] = hipAddressModeWrap;
texU.addressMode[1] = hipAddressModeWrap;
texU.filterMode = hipFilterModeLinear;
texU.normalized = false;
CUDA_CHECK( hipBindTextureToArray( texU, Ux_gp, channelDescU));
texV.addressMode[0] = hipAddressModeWrap;
texV.addressMode[1] = hipAddressModeWrap;
texV.filterMode = hipFilterModeLinear;
texV.normalized = false;
CUDA_CHECK( hipBindTextureToArray( texV, Vx_gp, channelDescV));
CUDA_CHECK( hipMallocArray( &distXU_gp, &channelDescdXU, nxiu,netau ));
//CUDA_CHECK( hipMallocArray( &distXV_gp, &channelDescdXV, netav, nxiv ));
//CUDA_CHECK( hipMallocArray( &distYU_gp, &channelDescdYU, netau, nxiu ));
CUDA_CHECK( hipMallocArray( &distYV_gp, &channelDescdYV, nxiv,netav ));
CUDA_CHECK( hipMallocArray( &lon_ugp, &channelDesclonu, nxiu,netau ));
CUDA_CHECK( hipMallocArray( &lat_ugp, &channelDesclatu, nxiu, netau ));
//CUDA_CHECK( hipMallocArray( &lon_vgp, &channelDesclonv, netav, nxiv ));
//CUDA_CHECK( hipMallocArray( &lat_vgp, &channelDesclatv, netav, nxiv ));
CUDA_CHECK( hipMemcpyToArray(distXU_gp, 0, 0, distXU, netau*nxiu* sizeof(float), hipMemcpyHostToDevice));
//CUDA_CHECK( hipMemcpyToArray(distYU_gp, 0, 0, distYU, netau*nxiu* sizeof(float), hipMemcpyHostToDevice));
//CUDA_CHECK( hipMemcpyToArray(distXV_gp, 0, 0, distXV, netav*nxiv* sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK( hipMemcpyToArray(distYV_gp, 0, 0, distYV, netav*nxiv* sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK( hipMemcpyToArray(lon_ugp, 0, 0, lon_u, netau*nxiu* sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK( hipMemcpyToArray(lat_ugp, 0, 0, lat_u, netau*nxiu* sizeof(float), hipMemcpyHostToDevice));
//CUDA_CHECK( hipMemcpyToArray(lon_vgp, 0, 0, lon_v, netav*nxiv* sizeof(float), hipMemcpyHostToDevice));
//CUDA_CHECK( hipMemcpyToArray(lat_vgp, 0, 0, lat_v, netav*nxiv* sizeof(float), hipMemcpyHostToDevice));
texlonu.addressMode[0] = hipAddressModeWrap;
texlonu.addressMode[1] = hipAddressModeWrap;
texlonu.filterMode = hipFilterModeLinear;
texlonu.normalized = false;
CUDA_CHECK( hipBindTextureToArray( texlonu, lon_ugp, channelDesclonu));
texlatu.addressMode[0] = hipAddressModeWrap;
texlatu.addressMode[1] = hipAddressModeWrap;
texlatu.filterMode = hipFilterModeLinear;
texlatu.normalized = false;
CUDA_CHECK( hipBindTextureToArray( texlatu, lat_ugp, channelDesclatu));
texdXU.addressMode[0] = hipAddressModeWrap;
texdXU.addressMode[1] = hipAddressModeWrap;
texdXU.filterMode = hipFilterModeLinear;
texdXU.normalized = false;
CUDA_CHECK( hipBindTextureToArray( texdXU, distXU_gp, channelDescdXU));
texdYV.addressMode[0] = hipAddressModeWrap;
texdYV.addressMode[1] = hipAddressModeWrap;
texdYV.filterMode = hipFilterModeLinear;
texdYV.normalized = false;
CUDA_CHECK( hipBindTextureToArray( texdYV, distYV_gp, channelDescdYV));
printf(" ...done\n");
//int nbblocks=npart/256;
//dim3 blockDim(256, 1, 1);
//dim3 gridDim(npart / blockDim.x, 1, 1);
//ij2lonlat<<<gridDim, blockDim, 0>>>(npart,xl_g,yl_g,xp_g,yp_g);
//CUDA_CHECK( hipDeviceSynchronize() );
//CUDA_CHECK( hipMemcpy(xp, xp_g, npart*sizeof(float), hipMemcpyDeviceToHost) );
//CUDA_CHECK( hipMemcpy(yp, yp_g, npart*sizeof(float), hipMemcpyDeviceToHost) );
char fileoutn[15];
sprintf (fileoutn, "Part_%d.xyz", stp);
writexyz(xp,yp,zp,tp,xl,yl,npart,fileoutn);
creatncfile(ncoutfile,nxiu,netau,lon_u,lat_u,stp*dt,Nincel,Nincel,Nincel);
printf("Running Model...\n");
//Run the model without the GL stuff
while (stp*dt<=hddt*hdend)
{
runCuda();
if (stp==nextoutstep)
{
char fileoutn[15];
nextoutstep=nextoutstep+outstep;
switch (outtype)
{
case 1:
sprintf (fileoutn, "Part_%d.xyz", stp);
//Get the results to plot.
CUDA_CHECK( hipMemcpy(xp, xp_g, npart*sizeof(float), hipMemcpyDeviceToHost) );
CUDA_CHECK( hipMemcpy(yp, yp_g, npart*sizeof(float), hipMemcpyDeviceToHost) );
//CUDA_CHECK( hipMemcpy(zp, zp_g, npart*sizeof(float), hipMemcpyDeviceToHost) );
CUDA_CHECK( hipMemcpy(tp, tp_g, npart*sizeof(float), hipMemcpyDeviceToHost) );
CUDA_CHECK( hipMemcpy(xl, xl_g, npart*sizeof(float), hipMemcpyDeviceToHost) );
CUDA_CHECK( hipMemcpy(yl, yl_g, npart*sizeof(float), hipMemcpyDeviceToHost) );
//printf("saving Part_%d.xyz file", stp);
writexyz(xp,yp,zp,tp,xl,yl,npart,fileoutn);
break;
case 2:
CUDA_CHECK( hipMemcpy(Nincel, Nincel_g, nxiu*netau*sizeof(float), hipMemcpyDeviceToHost) );
CUDA_CHECK( hipMemcpy(cNincel, cNincel_g, nxiu*netau*sizeof(float), hipMemcpyDeviceToHost) );
CUDA_CHECK( hipMemcpy(cTincel, cTincel_g, nxiu*netau*sizeof(float), hipMemcpyDeviceToHost) );
writestep2nc(ncoutfile,nxiu,netau,stp*dt,Nincel,cNincel,cTincel);
break;
case 3:
sprintf (fileoutn, "Part_%d.xyz", stp);
//Get the results to plot.
CUDA_CHECK( hipMemcpy(xp, xp_g, npart*sizeof(float), hipMemcpyDeviceToHost) );
CUDA_CHECK( hipMemcpy(yp, yp_g, npart*sizeof(float), hipMemcpyDeviceToHost) );
//CUDA_CHECK( hipMemcpy(zp, zp_g, npart*sizeof(float), hipMemcpyDeviceToHost) );
CUDA_CHECK( hipMemcpy(tp, tp_g, npart*sizeof(float), hipMemcpyDeviceToHost) );
CUDA_CHECK( hipMemcpy(xl, xl_g, npart*sizeof(float), hipMemcpyDeviceToHost) );
CUDA_CHECK( hipMemcpy(yl, yl_g, npart*sizeof(float), hipMemcpyDeviceToHost) );
//printf("saving Part_%d.xyz file", stp);
writexyz(xp,yp,zp,tp,xl,yl,npart,fileoutn);
CUDA_CHECK( hipMemcpy(Nincel, Nincel_g, nxiu*netau*sizeof(float), hipMemcpyDeviceToHost) );
CUDA_CHECK( hipMemcpy(cNincel, cNincel_g, nxiu*netau*sizeof(float), hipMemcpyDeviceToHost) );
CUDA_CHECK( hipMemcpy(cTincel, cTincel_g, nxiu*netau*sizeof(float), hipMemcpyDeviceToHost) );
writestep2nc(ncoutfile,nxiu,netau,stp*dt,Nincel,cNincel,cTincel);
break;
}
}
stp++;
}
hipDeviceReset();
}
| be3c53ee5145959b563aa4dafe619f5eba03d34e.cu | //////////////////////////////////////////////////////////////////////////////////
//DispROMS_GPU //
//Copyright (C) 2013 Bosserelle //
// //
//This program is free software: you can redistribute it and/or modify //
//it under the terms of the GNU General Public License as published by //
//the Free Software Foundation. //
// //
//This program is distributed in the hope that it will be useful, //
//but WITHOUT ANY WARRANTY; without even the implied warranty of //
//MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the //
//GNU General Public License for more details. //
// //
//You should have received a copy of the GNU General Public License //
//along with this program. If not, see <http://www.gnu.org/licenses/>. //
//////////////////////////////////////////////////////////////////////////////////
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#include <cmath>
#include <fstream>
#include <netcdf.h>
#include <algorithm>
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand.h>
#include <math.h>
// includes, GL
#include <GL/glew.h>
#include <GL/glut.h>
#include <cuda_gl_interop.h>
#include <vector_types.h>
#define pi 3.14159265
#include "DispROMS_kernel.cu"
char ncfile[256];
char ncoutfile[256];
int nxiu,nxiv,netau,netav,nl,nt;
float *Uo,*Un;
float *Vo,*Vn;
float *Uo_g,*Un_g,*Ux_g;
float *Vo_g,*Vn_g,*Vx_g;
float *Umask, *Vmask, *Umask_g, *Vmask_g;
float *Nincel,*cNincel,*cTincel;
float *Nincel_g,*cNincel_g,*cTincel_g;
float *distXU, *distYU, *distXV, *distYV;
float *lat_u,*lon_u,*lat_v,*lon_v;
int hdstep,hdstart,hdend;
int lev;
float hddt;
int stp,outstep,nextoutstep,outtype;
float *xp,*yp,*zp,*tp;
float *xl,*yl;
float *xp_g,*yp_g,*zp_g,*tp_g;
float *xl_g,*yl_g;
//particle properties
int npart,backswitch;
float dt,Eh,Ev,minrwdepth;
int GPUDEV=0;
int SEED = 777;
float * d_Rand; //GPU random number
curandGenerator_t gen;
cudaError CUDerr;
cudaArray* Ux_gp;
cudaArray* Vx_gp;
cudaArray* distXU_gp;
cudaArray* distYU_gp;
cudaArray* distXV_gp;
cudaArray* distYV_gp;
cudaArray* lon_ugp;
cudaArray* lon_vgp;
cudaArray* lat_ugp;
cudaArray* lat_vgp;
cudaChannelFormatDesc channelDescU = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaChannelFormatDesc channelDescV = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaChannelFormatDesc channelDescdXU = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaChannelFormatDesc channelDescdXV = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaChannelFormatDesc channelDescdYU = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaChannelFormatDesc channelDescdYV = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaChannelFormatDesc channelDesclonu = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaChannelFormatDesc channelDesclonv = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaChannelFormatDesc channelDesclatu = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaChannelFormatDesc channelDesclatv = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
/////////////////////////////////
//Define external functions
////////////////////////////////
void writexyz(float * x, float * y, float * z,float *t,float *xl,float *yl, int npart,char outfile[]);
void readgridsize(char ncfile[],int &nxiu,int &nxiv,int &netau,int &netav,int &nl,int &nt);
void readHDstep(char ncfile[],int nxiu,int nxiv,int netau,int netav,int nl,int nt, int hdstep,int lev,float *&Uo,float *&Vo);
void readlatlon(char ncfile[],int nxiu,int nxiv,int netau,int netav,float *&lat_u,float *&lon_u,float *&lat_v,float *&lon_v);
void readUVmask(char ncfile[],int nxiu,int nxiv,int netau,int netav,float *&Uo,float *&Vo);
void creatncfile(char outfile[], int nx,int ny,float *xval, float *yval,float totaltime,float *Nincel,float *cNincel,float *cTincel);
void writestep2nc(char outfile[], int nx,int ny,float totaltime,float *Nincel,float *cNincel,float * cTincel);
template <class T> const T& min (const T& a, const T& b);
template <class T> const T& max (const T& a, const T& b);
template <class T> const T& round(const T& a);
void CUDA_CHECK(cudaError CUDerr)
{
if( cudaSuccess != CUDerr) {
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( CUDerr) );
exit(EXIT_FAILURE);
}
}
void runCuda(void)
{
float data_nu=netau*nxiu;
float data_nv=netav*nxiv;
dim3 blockDimHD(16, 1, 1);
dim3 gridDimHD(ceil(max(netau,netav)*max(nxiu,nxiv) / (float)blockDimHD.x), 1, 1);
if(stp*dt>=hddt*(hdstep-hdstart+1))//Not sure about the +1
{
//Read next step
hdstep++;
int steptoread=hdstep;
if (backswitch>0)
{
steptoread=hdend-hdstep;
}
readHDstep(ncfile,nxiu,nxiv,netau,netav,nl,nt,steptoread,lev,Un,Vn);
NextHDstep<<<gridDimHD, blockDimHD, 0>>>(data_nu,Uo_g,Un_g);
CUDA_CHECK( cudaThreadSynchronize() );
NextHDstep<<<gridDimHD, blockDimHD, 0>>>(data_nv,Vo_g,Vn_g);
CUDA_CHECK( cudaThreadSynchronize() );
CUDA_CHECK( cudaMemcpy(Un_g, Un, data_nu*sizeof(float ), cudaMemcpyHostToDevice) );
CUDA_CHECK( cudaMemcpy(Vn_g, Vn, data_nv*sizeof(float ), cudaMemcpyHostToDevice) );
}
ResetNincel<<<gridDimHD, blockDimHD, 0>>>(data_nu,Nincel_g);
CUDA_CHECK( cudaThreadSynchronize() );
int interpstep=hdstep-hdstart+1;
HD_interp<<<gridDimHD, blockDimHD, 0>>>(data_nu,stp,backswitch,interpstep,dt,hddt/*,Umask_g*/,Uo_g,Un_g,Ux_g);
CUDA_CHECK( cudaThreadSynchronize() );
HD_interp<<<gridDimHD, blockDimHD, 0>>>(data_nv,stp,backswitch,interpstep,dt,hddt/*,Vmask_g*/,Vo_g,Vn_g,Vx_g);
CUDA_CHECK( cudaThreadSynchronize() );
CUDA_CHECK( cudaMemcpyToArray( Ux_gp, 0, 0, Ux_g, data_nu* sizeof(float), cudaMemcpyDeviceToDevice));
CUDA_CHECK( cudaMemcpyToArray( Vx_gp, 0, 0, Vx_g, data_nv* sizeof(float), cudaMemcpyDeviceToDevice));
//Generate some random numbers
// Set seed
//curandSetPseudoRandomGeneratorSeed(gen, SEED);
// Generate n floats on device
curandGenerateUniform(gen, d_Rand, npart);
//run the model
int nbblocks=npart/256;
dim3 blockDim(256, 1, 1);
dim3 gridDim(npart / blockDim.x, 1, 1);
//Calculate particle step
updatepartpos<<<gridDim, blockDim, 0>>>(npart,dt,Eh,d_Rand,xl_g, yl_g,zp_g,tp_g);
CUDA_CHECK( cudaThreadSynchronize() );
ij2lonlat<<<gridDim, blockDim, 0>>>(npart,xl_g,yl_g,xp_g,yp_g);
CUDA_CHECK( cudaThreadSynchronize() );
CalcNincel<<<gridDim, blockDim, 0>>>(npart,nxiu,netau,xl_g, yl_g,tp_g,Nincel_g,cNincel_g,cTincel_g);
CUDA_CHECK( cudaThreadSynchronize() );
}
int main(int argc, char **argv)
{
//////////////////////////////////////////////////////
///// Read Operational file /////
//////////////////////////////////////////////////////
char opfile[]="Disp_G3.dat";
//char hdfile[256];
char seedfile[256];
float xcenter;
float ycenter;
float LL;
float HH;
///////////////////////////////////////////////////////////
//Read Operational file
///////////////////////////////////////////////////////////
FILE * fop;
fop=fopen(opfile,"r");
fscanf(fop,"%*s %s\t%*s",&ncfile);
fscanf(fop,"%d\t%*s",&GPUDEV);
fscanf(fop,"%f\t%*s",&hddt);
fscanf(fop,"%d\t%*s",&lev);
fscanf(fop,"%d,%d\t%*s",&hdstart,&hdend);
fscanf(fop,"%u\t%*s",&npart);
fscanf(fop,"%d\t%*s",&backswitch);
fscanf(fop,"%f\t%*s",&dt);
fscanf(fop,"%f\t%*s",&Eh);
fscanf(fop,"%f\t%*s",&Ev);
fscanf(fop,"%f\t%*s",&minrwdepth);
fscanf(fop,"%f,%f\t%*s",&xcenter,&ycenter);
fscanf(fop,"%f,%f\t%*s",&LL,&HH);
fscanf(fop,"%s\t%*s",&seedfile);
fscanf(fop,"%d\t%*s",&outtype);
fscanf(fop,"%d\t%*s",&outstep);
fscanf(fop,"%s\t%*s",&ncoutfile);
fclose(fop);
printf(" ncfile:%s\n Hddt:%f\t lev:%d\n Hdstart:%d \t Hdstop:%d\n npart:%d\n dt:%f\n Eh:%f\t Ev:%f\n Mindepth:%f\n Xcenter:%f\t Ycenter:%f\n LL:%f\t HH:%f\n Seed file:%s\n",ncfile,hddt,lev,hdstart,hdend,npart,dt,Eh,Ev,minrwdepth,xcenter,ycenter,LL,HH,seedfile);
//read the dimentions of grid, levels and time
printf("Read nc file dimensions... ");
readgridsize(ncfile,nxiu,nxiv,netau,netav,nl,nt);
printf("...done\n");
///////////////////////////
//Allocate Memory on CPU //
///////////////////////////
printf("Allocate CPU memory... ");
//Vel ARRAYS
Uo= (float *)malloc(nxiu*netau*sizeof(float ));
Un= (float *)malloc(nxiu*netau*sizeof(float ));
Vo= (float *)malloc(nxiv*netav*sizeof(float ));
Vn= (float *)malloc(nxiv*netav*sizeof(float ));
Umask= (float *)malloc(nxiu*netau*sizeof(float ));
Vmask= (float *)malloc(nxiv*netav*sizeof(float ));
//Lat and Long for each array
lat_u= (float *)malloc(nxiu*netau*sizeof(float ));
lon_u= (float *)malloc(nxiu*netau*sizeof(float ));
lat_v= (float *)malloc(nxiv*netav*sizeof(float ));
lon_v= (float *)malloc(nxiv*netav*sizeof(float ));
//Distance arrays
distXU=(float *)malloc(nxiu*netau*sizeof(float ));
distYU=(float *)malloc(nxiu*netau*sizeof(float ));
distXV=(float *)malloc(nxiv*netav*sizeof(float ));
distYV=(float *)malloc(nxiv*netav*sizeof(float ));
//particles
xp = (float *)malloc(npart*sizeof(float));
yp = (float *)malloc(npart*sizeof(float));
zp = (float *)malloc(npart*sizeof(float));
tp = (float *)malloc(npart*sizeof(float));
xl = (float *)malloc(npart*sizeof(float));
yl = (float *)malloc(npart*sizeof(float));
//Nincel
Nincel= (float *)malloc(nxiu*netau*sizeof(float ));
cNincel= (float *)malloc(nxiu*netau*sizeof(float ));
cTincel= (float *)malloc(nxiu*netau*sizeof(float ));
for (int i=0; i<nxiu; i++)
{
for (int j=0; j<netau; j++)
{
Nincel[i+j*nxiu]=0.0f;
}
}
printf("...done\n");
//read lat and lon
printf("Read lat lon array... ");
readlatlon(ncfile,nxiu,nxiv,netau,netav,lat_u,lon_u,lat_v,lon_v);
printf(" ...Calculate distance array...");
float R = 6372797.560856;
//Calculate the distance in meter between each cells for u grid
for (int i=0; i<nxiu-1; i++)
{
for (int j=0; j<netau; j++)
{
//calc distance between each i using haversine formula
float dlat=(lat_u[(i+1)+j*nxiu]-lat_u[i+j*nxiu])*pi/180.0f;
float lat1=lat_u[i+j*nxiu]*pi/180.0f;
float lat2=lat_u[(i+1)+j*nxiu]*pi/180.0f;
float dlon=(lon_u[(i+1)+j*nxiu]-lon_u[i+j*nxiu])*pi/180.0f;
float a=sin(dlat/2)*sin(dlat/2)+cos(lat1)*cos(lat2)*sin(dlon/2)*sin(dlon/2);
float c=2*atan2f(sqrtf(a),sqrtf(1-a));
distXU[i+j*nxiu]=c*R;
}
}
for (int i=0; i<nxiu; i++)
{
for (int j=0; j<netau-1; j++)
{
//calc distance between each j using haversine formula
float dlat=(lat_u[i+(j+1)*nxiu]-lat_u[i+j*nxiu])*pi/180.0f;
float lat1=lat_u[i+j*nxiu]*pi/180.0f;
float lat2=lat_u[i+(j+1)*nxiu]*pi/180.0f;
float dlon=(lon_u[i+(j+1)*nxiu]-lon_u[i+j*nxiu])*pi/180.0f;
float a=sin(dlat/2)*sin(dlat/2)+cos(lat1)*cos(lat2)*sin(dlon/2)*sin(dlon/2);
float c=2*atan2f(sqrtf(a),sqrtf(1-a));
distYU[i+j*nxiu]=c*R;
}
}
//fill in boundaries
for (int j=0; j<netau; j++)
{
//
distXU[nxiu-1+j*nxiu]=distXU[nxiu-2+j*nxiu];
}
for (int i=0; i<nxiu; i++)
{
//
distYU[i+(netau-1)*nxiu]=distYU[i+(netau-2)*nxiu];
}
//Vdirection
for (int i=0; i<nxiu-1; i++)
{
for (int j=0; j<netau; j++)
{
//calc distance between each i using haversine formula
float dlat=(lat_v[(i+1)+j*nxiu]-lat_v[i+j*nxiu])*pi/180.0f;
float lat1=lat_v[i+j*nxiu]*pi/180.0f;
float lat2=lat_v[(i+1)+j*nxiu]*pi/180.0f;
float dlon=(lon_v[(i+1)+j*nxiu]-lon_v[i+j*nxiu])*pi/180.0f;
float a=sin(dlat/2)*sin(dlat/2)+cos(lat1)*cos(lat2)*sin(dlon/2)*sin(dlon/2);
float c=2*atan2f(sqrtf(a),sqrtf(1-a));
distXV[i+j*nxiu]=c*R;
}
}
for (int i=0; i<nxiu; i++)
{
for (int j=0; j<netau-1; j++)
{
//calc distance between each j using haversine formula
float dlat=(lat_v[i+(j+1)*nxiu]-lat_v[i+j*nxiu])*pi/180.0f;
float lat1=lat_v[i+j*nxiu]*pi/180.0f;
float lat2=lat_v[i+(j+1)*nxiu]*pi/180.0f;
float dlon=(lon_v[i+(j+1)*nxiu]-lon_v[i+j*nxiu])*pi/180.0f;
float a=sin(dlat/2)*sin(dlat/2)+cos(lat1)*cos(lat2)*sin(dlon/2)*sin(dlon/2);
float c=2*atan2f(sqrtf(a),sqrtf(1-a));
distYV[i+j*nxiu]=c*R;
}
}
//fill in boundaries
for (int j=0; j<netau; j++)
{
//
distXV[nxiu-1+j*nxiu]=distXV[nxiu-2+j*nxiu];
}
for (int i=0; i<nxiu; i++)
{
//
distYV[i+(netau-1)*nxiu]=distYV[i+(netau-2)*nxiu];
}
printf("...done\n");
//Calculate first HD step
//outstep=10;
stp=0;//hdstart*hddt/dt;
hdstep=hdstart;
nextoutstep=outstep+stp;
//printf("HD step:%d\n ",hdstep);
if (hdend==0)
{
hdend=nt-1;
}
int steptoread=hdstep;
if (backswitch>0)
{
steptoread=hdend-hdstep;
}
//////////////////////////////
//Read first step in Hd model
///////////////////////////////
//Read U and V mask
//readUVmask(ncfile,nxiu,nxiv,netau,netav,Umask,Vmask);
//printf("Read Hd model first step... ");
readHDstep(ncfile,nxiu,nxiv,netau,netav,nl,nt,steptoread,lev,Uo,Vo);
//printf("...done\n");
//////////////////////////////
// Init Particle position
//////////////////////////////
char noseedfile[] = "seed";
if (strcmp(seedfile,noseedfile)!=0)
{
printf("...reading seed file.\n");
FILE * fsd;
int nseedpos;
//read input data:
fsd=fopen(seedfile,"r");
fscanf(fsd,"%u",&nseedpos);
for(int ppos=0; ppos<min(nseedpos,npart); ppos++)
{
fscanf(fsd,"%f %f %f %f %f %f",&xp[ppos],&yp[ppos],&zp[ppos],&tp[ppos],&xl[ppos],&yl[ppos]);
}
if (nseedpos<npart)
{
printf("WARNING there are less seed positions in file than particles seed position will be repeated");
for (int rppos=0; rppos<(npart-nseedpos);rppos++)
{
xp[nseedpos+rppos]=xp[rppos];
yp[nseedpos+rppos]=yp[rppos];
zp[nseedpos+rppos]=zp[rppos];
tp[nseedpos+rppos]=tp[rppos];
xl[nseedpos+rppos]=xl[rppos];
yl[nseedpos+rppos]=yl[rppos];
}
}
fclose(fsd);
}
else
{
printf("Generating particle initial position in CPU mem...");
//Generating input data on CPU
//Set initial position for particle
float dlat=0.1;
float dlon=0.1;
float a,c,d;
float R=6371000;
float dlatrad,dlonrad,ycenterrad;
dlatrad=dlat*pi/180;
dlonrad=dlon*pi/180;
ycenterrad=ycenter*pi/180;
a=pow(sin(dlatrad/2),2)+cos(ycenterrad)*cos(ycenterrad+dlatrad)*pow(sin(dlonrad/2),2);
c=2*atan2(sqrt(a),sqrt(1-a));
d=R*c;
LL=LL*dlon/d;
HH=HH*dlat/d;
float ddx=sqrt(LL*HH/npart);
for(int i = 0; i < npart; i++)
{
// initialize random seed:
float dist;
int minki=0;
int minkj=0;
int test;
float mindist=100000;
zp[i] = 0.5f;
tp[i] = 0.0f;
xp[i]=(xcenter-LL/2)+ddx*(i - floor(((float)i)/round(LL/ddx))*round(LL/ddx));
//printf("xp[%d]=%f\n",i,xp[i]);
yp[i]=(ycenter-HH/2)+ddx*(floor((float)i/(LL/ddx)));
//printf("yp[%d]=%f\n",i,yp[i]);
//
for (int kj=0;kj<netau-1;kj++)
{
for (int ki=0; ki<nxiu-1;ki++)
{
dist=sqrt((lon_u[ki+kj*nxiu]-xp[i])*(lon_u[ki+kj*nxiu]-xp[i])+(lat_u[ki+kj*nxiu]-yp[i])*(lat_u[ki+kj*nxiu]-yp[i]));
if (dist<mindist)
{
mindist=dist;
minki=ki;
minkj=kj;
}
}
}
//Yes I know pretty lazy stuff it is for quick seeding if you want the proper stuff do a seed file...
xp[i]=lon_u[minki+minkj*nxiu];
yp[i]=lat_u[minki+minkj*nxiu];
xl[i]=minki;
yl[i]=minkj;
//xlv[i]=minki+0.5;in i and j coordinate it is jut off by half the node
//ylu[i]=minkj-0.5;
}
}
printf(" ...done\n");
/////////////////////////////
//Prepare GPU
////////////////////////////
// Init GPU data
int GPUDEVICE=GPUDEV;
CUDA_CHECK(cudaSetDevice(GPUDEVICE));
//CUT_DEVICE_INIT(argc, argv);
/////////////////////////////////////
// ALLOCATE GPU MEMORY
/////////////////////////////////
printf("Allocating GPU memory... ");
float DATA_SZ=npart*sizeof(float);
CUDA_CHECK(cudaMalloc((void **)&xp_g, DATA_SZ));
CUDA_CHECK(cudaMalloc((void **)&yp_g, DATA_SZ));
CUDA_CHECK(cudaMalloc((void **)&zp_g, DATA_SZ));
CUDA_CHECK(cudaMalloc((void **)&tp_g, DATA_SZ));
CUDA_CHECK(cudaMalloc((void **)&xl_g, DATA_SZ));
CUDA_CHECK(cudaMalloc((void **)&yl_g, DATA_SZ));
CUDA_CHECK(cudaMalloc((void **)&Uo_g, netau*nxiu* sizeof(float)));
CUDA_CHECK(cudaMalloc((void **)&Un_g, netau*nxiu* sizeof(float)));
CUDA_CHECK(cudaMalloc((void **)&Ux_g, netau*nxiu* sizeof(float)));
CUDA_CHECK(cudaMalloc((void **)&Vo_g, netav*nxiv* sizeof(float)));
CUDA_CHECK(cudaMalloc((void **)&Vn_g, netav*nxiv* sizeof(float)));
CUDA_CHECK(cudaMalloc((void **)&Vx_g, netav*nxiv* sizeof(float)));
CUDA_CHECK(cudaMalloc((void **)&Nincel_g, netau*nxiu* sizeof(float)));
CUDA_CHECK(cudaMalloc((void **)&cNincel_g, netau*nxiu* sizeof(float)));
CUDA_CHECK(cudaMalloc((void **)&cTincel_g, netau*nxiu* sizeof(float)));
CUDA_CHECK(cudaMalloc((void **)&Umask_g, netau*nxiu* sizeof(float)));
CUDA_CHECK(cudaMalloc((void **)&Vmask_g, netav*nxiv* sizeof(float)));
printf(" ...done\n");
printf("Transfert vectors to GPU memory... ");
CUDA_CHECK( cudaMemcpy(Uo_g, Uo, netau*nxiu*sizeof(float ), cudaMemcpyHostToDevice) );
CUDA_CHECK( cudaMemcpy(Un_g, Uo, netau*nxiu*sizeof(float ), cudaMemcpyHostToDevice) );
CUDA_CHECK( cudaMemcpy(Ux_g, Uo, netau*nxiu*sizeof(float ), cudaMemcpyHostToDevice) );
CUDA_CHECK( cudaMemcpy(Vo_g, Vo, netav*nxiv*sizeof(float ), cudaMemcpyHostToDevice) );
CUDA_CHECK( cudaMemcpy(Vn_g, Vo, netav*nxiv*sizeof(float ), cudaMemcpyHostToDevice) );
CUDA_CHECK( cudaMemcpy(Vx_g, Vo, netav*nxiv*sizeof(float ), cudaMemcpyHostToDevice) );
CUDA_CHECK( cudaMemcpy(Umask_g, Umask, netau*nxiu*sizeof(float ), cudaMemcpyHostToDevice) );
CUDA_CHECK( cudaMemcpy(Vmask_g, Vmask, netav*nxiv*sizeof(float ), cudaMemcpyHostToDevice) );
CUDA_CHECK( cudaMemcpy(Nincel_g, Nincel, netau*nxiu*sizeof(float ), cudaMemcpyHostToDevice) );
CUDA_CHECK( cudaMemcpy(cNincel_g, Nincel, netau*nxiu*sizeof(float ), cudaMemcpyHostToDevice) );
CUDA_CHECK( cudaMemcpy(cTincel_g, Nincel, netau*nxiu*sizeof(float ), cudaMemcpyHostToDevice) );
CUDA_CHECK( cudaMemcpy(xp_g, xp, npart*sizeof(float ), cudaMemcpyHostToDevice) );
CUDA_CHECK( cudaMemcpy(yp_g, yp, npart*sizeof(float ), cudaMemcpyHostToDevice) );
CUDA_CHECK( cudaMemcpy(zp_g, zp, npart*sizeof(float ), cudaMemcpyHostToDevice) );
CUDA_CHECK( cudaMemcpy(tp_g, tp, npart*sizeof(float ), cudaMemcpyHostToDevice) );
CUDA_CHECK( cudaMemcpy(xl_g, xl, npart*sizeof(float ), cudaMemcpyHostToDevice) );
CUDA_CHECK( cudaMemcpy(yl_g, yl, npart*sizeof(float ), cudaMemcpyHostToDevice) );
// Loading random number generator
curandCreateGenerator(&gen,CURAND_RNG_PSEUDO_DEFAULT);
CUDA_CHECK( cudaMalloc((void **)&d_Rand,npart*sizeof(float)) );
printf(" ...done\n");
printf("Create textures on GPU memory... ");
// Copy velocity arrays
CUDA_CHECK( cudaMallocArray( &Ux_gp, &channelDescU, nxiu,netau ));
CUDA_CHECK( cudaMallocArray( &Vx_gp, &channelDescV, nxiv,netav ));
CUDA_CHECK( cudaMemcpyToArray( Ux_gp, 0, 0, Uo, netau*nxiu* sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK( cudaMemcpyToArray( Vx_gp, 0, 0, Vo, netav*nxiv* sizeof(float), cudaMemcpyHostToDevice));
texU.addressMode[0] = cudaAddressModeWrap;
texU.addressMode[1] = cudaAddressModeWrap;
texU.filterMode = cudaFilterModeLinear;
texU.normalized = false;
CUDA_CHECK( cudaBindTextureToArray( texU, Ux_gp, channelDescU));
texV.addressMode[0] = cudaAddressModeWrap;
texV.addressMode[1] = cudaAddressModeWrap;
texV.filterMode = cudaFilterModeLinear;
texV.normalized = false;
CUDA_CHECK( cudaBindTextureToArray( texV, Vx_gp, channelDescV));
CUDA_CHECK( cudaMallocArray( &distXU_gp, &channelDescdXU, nxiu,netau ));
//CUDA_CHECK( cudaMallocArray( &distXV_gp, &channelDescdXV, netav, nxiv ));
//CUDA_CHECK( cudaMallocArray( &distYU_gp, &channelDescdYU, netau, nxiu ));
CUDA_CHECK( cudaMallocArray( &distYV_gp, &channelDescdYV, nxiv,netav ));
CUDA_CHECK( cudaMallocArray( &lon_ugp, &channelDesclonu, nxiu,netau ));
CUDA_CHECK( cudaMallocArray( &lat_ugp, &channelDesclatu, nxiu, netau ));
//CUDA_CHECK( cudaMallocArray( &lon_vgp, &channelDesclonv, netav, nxiv ));
//CUDA_CHECK( cudaMallocArray( &lat_vgp, &channelDesclatv, netav, nxiv ));
CUDA_CHECK( cudaMemcpyToArray(distXU_gp, 0, 0, distXU, netau*nxiu* sizeof(float), cudaMemcpyHostToDevice));
//CUDA_CHECK( cudaMemcpyToArray(distYU_gp, 0, 0, distYU, netau*nxiu* sizeof(float), cudaMemcpyHostToDevice));
//CUDA_CHECK( cudaMemcpyToArray(distXV_gp, 0, 0, distXV, netav*nxiv* sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK( cudaMemcpyToArray(distYV_gp, 0, 0, distYV, netav*nxiv* sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK( cudaMemcpyToArray(lon_ugp, 0, 0, lon_u, netau*nxiu* sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK( cudaMemcpyToArray(lat_ugp, 0, 0, lat_u, netau*nxiu* sizeof(float), cudaMemcpyHostToDevice));
//CUDA_CHECK( cudaMemcpyToArray(lon_vgp, 0, 0, lon_v, netav*nxiv* sizeof(float), cudaMemcpyHostToDevice));
//CUDA_CHECK( cudaMemcpyToArray(lat_vgp, 0, 0, lat_v, netav*nxiv* sizeof(float), cudaMemcpyHostToDevice));
texlonu.addressMode[0] = cudaAddressModeWrap;
texlonu.addressMode[1] = cudaAddressModeWrap;
texlonu.filterMode = cudaFilterModeLinear;
texlonu.normalized = false;
CUDA_CHECK( cudaBindTextureToArray( texlonu, lon_ugp, channelDesclonu));
texlatu.addressMode[0] = cudaAddressModeWrap;
texlatu.addressMode[1] = cudaAddressModeWrap;
texlatu.filterMode = cudaFilterModeLinear;
texlatu.normalized = false;
CUDA_CHECK( cudaBindTextureToArray( texlatu, lat_ugp, channelDesclatu));
texdXU.addressMode[0] = cudaAddressModeWrap;
texdXU.addressMode[1] = cudaAddressModeWrap;
texdXU.filterMode = cudaFilterModeLinear;
texdXU.normalized = false;
CUDA_CHECK( cudaBindTextureToArray( texdXU, distXU_gp, channelDescdXU));
texdYV.addressMode[0] = cudaAddressModeWrap;
texdYV.addressMode[1] = cudaAddressModeWrap;
texdYV.filterMode = cudaFilterModeLinear;
texdYV.normalized = false;
CUDA_CHECK( cudaBindTextureToArray( texdYV, distYV_gp, channelDescdYV));
printf(" ...done\n");
//int nbblocks=npart/256;
//dim3 blockDim(256, 1, 1);
//dim3 gridDim(npart / blockDim.x, 1, 1);
//ij2lonlat<<<gridDim, blockDim, 0>>>(npart,xl_g,yl_g,xp_g,yp_g);
//CUDA_CHECK( cudaThreadSynchronize() );
//CUDA_CHECK( cudaMemcpy(xp, xp_g, npart*sizeof(float), cudaMemcpyDeviceToHost) );
//CUDA_CHECK( cudaMemcpy(yp, yp_g, npart*sizeof(float), cudaMemcpyDeviceToHost) );
char fileoutn[15];
sprintf (fileoutn, "Part_%d.xyz", stp);
writexyz(xp,yp,zp,tp,xl,yl,npart,fileoutn);
creatncfile(ncoutfile,nxiu,netau,lon_u,lat_u,stp*dt,Nincel,Nincel,Nincel);
printf("Running Model...\n");
//Run the model without the GL stuff
while (stp*dt<=hddt*hdend)
{
runCuda();
if (stp==nextoutstep)
{
char fileoutn[15];
nextoutstep=nextoutstep+outstep;
switch (outtype)
{
case 1:
sprintf (fileoutn, "Part_%d.xyz", stp);
//Get the results to plot.
CUDA_CHECK( cudaMemcpy(xp, xp_g, npart*sizeof(float), cudaMemcpyDeviceToHost) );
CUDA_CHECK( cudaMemcpy(yp, yp_g, npart*sizeof(float), cudaMemcpyDeviceToHost) );
//CUDA_CHECK( cudaMemcpy(zp, zp_g, npart*sizeof(float), cudaMemcpyDeviceToHost) );
CUDA_CHECK( cudaMemcpy(tp, tp_g, npart*sizeof(float), cudaMemcpyDeviceToHost) );
CUDA_CHECK( cudaMemcpy(xl, xl_g, npart*sizeof(float), cudaMemcpyDeviceToHost) );
CUDA_CHECK( cudaMemcpy(yl, yl_g, npart*sizeof(float), cudaMemcpyDeviceToHost) );
//printf("saving Part_%d.xyz file", stp);
writexyz(xp,yp,zp,tp,xl,yl,npart,fileoutn);
break;
case 2:
CUDA_CHECK( cudaMemcpy(Nincel, Nincel_g, nxiu*netau*sizeof(float), cudaMemcpyDeviceToHost) );
CUDA_CHECK( cudaMemcpy(cNincel, cNincel_g, nxiu*netau*sizeof(float), cudaMemcpyDeviceToHost) );
CUDA_CHECK( cudaMemcpy(cTincel, cTincel_g, nxiu*netau*sizeof(float), cudaMemcpyDeviceToHost) );
writestep2nc(ncoutfile,nxiu,netau,stp*dt,Nincel,cNincel,cTincel);
break;
case 3:
sprintf (fileoutn, "Part_%d.xyz", stp);
//Get the results to plot.
CUDA_CHECK( cudaMemcpy(xp, xp_g, npart*sizeof(float), cudaMemcpyDeviceToHost) );
CUDA_CHECK( cudaMemcpy(yp, yp_g, npart*sizeof(float), cudaMemcpyDeviceToHost) );
//CUDA_CHECK( cudaMemcpy(zp, zp_g, npart*sizeof(float), cudaMemcpyDeviceToHost) );
CUDA_CHECK( cudaMemcpy(tp, tp_g, npart*sizeof(float), cudaMemcpyDeviceToHost) );
CUDA_CHECK( cudaMemcpy(xl, xl_g, npart*sizeof(float), cudaMemcpyDeviceToHost) );
CUDA_CHECK( cudaMemcpy(yl, yl_g, npart*sizeof(float), cudaMemcpyDeviceToHost) );
//printf("saving Part_%d.xyz file", stp);
writexyz(xp,yp,zp,tp,xl,yl,npart,fileoutn);
CUDA_CHECK( cudaMemcpy(Nincel, Nincel_g, nxiu*netau*sizeof(float), cudaMemcpyDeviceToHost) );
CUDA_CHECK( cudaMemcpy(cNincel, cNincel_g, nxiu*netau*sizeof(float), cudaMemcpyDeviceToHost) );
CUDA_CHECK( cudaMemcpy(cTincel, cTincel_g, nxiu*netau*sizeof(float), cudaMemcpyDeviceToHost) );
writestep2nc(ncoutfile,nxiu,netau,stp*dt,Nincel,cNincel,cTincel);
break;
}
}
stp++;
}
cudaThreadExit();
}
|
698a74562f3b8ff76b36358c94bc55790bc73143.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
__global__ void neg_double(int n,int idx,double *dy,int incy,double *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = -dy[i];
}
} | 698a74562f3b8ff76b36358c94bc55790bc73143.cu | extern "C"
__global__ void neg_double(int n,int idx,double *dy,int incy,double *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = -dy[i];
}
} |
54a25c1ed9d8cda71027734dda1b1e82bfef65ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <opencv2/gpu/device/common.hpp>
#include <opencv2/gpu/device/vec_traits.hpp>
#include <opencv2/gpu/device/vec_math.hpp>
#include <opencv2/gpu/device/limits.hpp>
namespace cv { namespace gpu {
namespace device
{
template <typename D>
__global__ void Bayer2BGR_8u(const PtrStepb src, DevMem2D_<D> dst, const bool blue_last, const bool start_with_green)
{
const int s_x = blockIdx.x * blockDim.x + threadIdx.x;
int s_y = blockIdx.y * blockDim.y + threadIdx.y;
if (s_y >= dst.rows || (s_x << 2) >= dst.cols)
return;
s_y = ::min(::max(s_y, 1), dst.rows - 2);
uchar4 patch[3][3];
patch[0][1] = ((const uchar4*) src.ptr(s_y - 1))[s_x];
patch[0][0] = ((const uchar4*) src.ptr(s_y - 1))[::max(s_x - 1, 0)];
patch[0][2] = ((const uchar4*) src.ptr(s_y - 1))[::min(s_x + 1, ((dst.cols + 3) >> 2) - 1)];
patch[1][1] = ((const uchar4*) src.ptr(s_y))[s_x];
patch[1][0] = ((const uchar4*) src.ptr(s_y))[::max(s_x - 1, 0)];
patch[1][2] = ((const uchar4*) src.ptr(s_y))[::min(s_x + 1, ((dst.cols + 3) >> 2) - 1)];
patch[2][1] = ((const uchar4*) src.ptr(s_y + 1))[s_x];
patch[2][0] = ((const uchar4*) src.ptr(s_y + 1))[::max(s_x - 1, 0)];
patch[2][2] = ((const uchar4*) src.ptr(s_y + 1))[::min(s_x + 1, ((dst.cols + 3) >> 2) - 1)];
D res0 = VecTraits<D>::all(numeric_limits<uchar>::max());
D res1 = VecTraits<D>::all(numeric_limits<uchar>::max());
D res2 = VecTraits<D>::all(numeric_limits<uchar>::max());
D res3 = VecTraits<D>::all(numeric_limits<uchar>::max());
if ((s_y & 1) ^ start_with_green)
{
const int t0 = (patch[0][1].x + patch[2][1].x + 1) >> 1;
const int t1 = (patch[1][0].w + patch[1][1].y + 1) >> 1;
const int t2 = (patch[0][1].x + patch[0][1].z + patch[2][1].x + patch[2][1].z + 2) >> 2;
const int t3 = (patch[0][1].y + patch[1][1].x + patch[1][1].z + patch[2][1].y + 2) >> 2;
const int t4 = (patch[0][1].z + patch[2][1].z + 1) >> 1;
const int t5 = (patch[1][1].y + patch[1][1].w + 1) >> 1;
const int t6 = (patch[0][1].z + patch[0][2].x + patch[2][1].z + patch[2][2].x + 2) >> 2;
const int t7 = (patch[0][1].w + patch[1][1].z + patch[1][2].x + patch[2][1].w + 2) >> 2;
if ((s_y & 1) ^ blue_last)
{
res0.x = t1;
res0.y = patch[1][1].x;
res0.z = t0;
res1.x = patch[1][1].y;
res1.y = t3;
res1.z = t2;
res2.x = t5;
res2.y = patch[1][1].z;
res2.z = t4;
res3.x = patch[1][1].w;
res3.y = t7;
res3.z = t6;
}
else
{
res0.x = t0;
res0.y = patch[1][1].x;
res0.z = t1;
res1.x = t2;
res1.y = t3;
res1.z = patch[1][1].y;
res2.x = t4;
res2.y = patch[1][1].z;
res2.z = t5;
res3.x = t6;
res3.y = t7;
res3.z = patch[1][1].w;
}
}
else
{
const int t0 = (patch[0][0].w + patch[0][1].y + patch[2][0].w + patch[2][1].y + 2) >> 2;
const int t1 = (patch[0][1].x + patch[1][0].w + patch[1][1].y + patch[2][1].x + 2) >> 2;
const int t2 = (patch[0][1].y + patch[2][1].y + 1) >> 1;
const int t3 = (patch[1][1].x + patch[1][1].z + 1) >> 1;
const int t4 = (patch[0][1].y + patch[0][1].w + patch[2][1].y + patch[2][1].w + 2) >> 2;
const int t5 = (patch[0][1].z + patch[1][1].y + patch[1][1].w + patch[2][1].z + 2) >> 2;
const int t6 = (patch[0][1].w + patch[2][1].w + 1) >> 1;
const int t7 = (patch[1][1].z + patch[1][2].x + 1) >> 1;
if ((s_y & 1) ^ blue_last)
{
res0.x = patch[1][1].x;
res0.y = t1;
res0.z = t0;
res1.x = t3;
res1.y = patch[1][1].y;
res1.z = t2;
res2.x = patch[1][1].z;
res2.y = t5;
res2.z = t4;
res3.x = t7;
res3.y = patch[1][1].w;
res3.z = t6;
}
else
{
res0.x = t0;
res0.y = t1;
res0.z = patch[1][1].x;
res1.x = t2;
res1.y = patch[1][1].y;
res1.z = t3;
res2.x = t4;
res2.y = t5;
res2.z = patch[1][1].z;
res3.x = t6;
res3.y = patch[1][1].w;
res3.z = t7;
}
}
const int d_x = (blockIdx.x * blockDim.x + threadIdx.x) << 2;
const int d_y = blockIdx.y * blockDim.y + threadIdx.y;
dst(d_y, d_x) = res0;
if (d_x + 1 < dst.cols)
dst(d_y, d_x + 1) = res1;
if (d_x + 2 < dst.cols)
dst(d_y, d_x + 2) = res2;
if (d_x + 3 < dst.cols)
dst(d_y, d_x + 3) = res3;
}
template <typename D>
__global__ void Bayer2BGR_16u(const PtrStepb src, DevMem2D_<D> dst, const bool blue_last, const bool start_with_green)
{
const int s_x = blockIdx.x * blockDim.x + threadIdx.x;
int s_y = blockIdx.y * blockDim.y + threadIdx.y;
if (s_y >= dst.rows || (s_x << 1) >= dst.cols)
return;
s_y = ::min(::max(s_y, 1), dst.rows - 2);
ushort2 patch[3][3];
patch[0][1] = ((const ushort2*) src.ptr(s_y - 1))[s_x];
patch[0][0] = ((const ushort2*) src.ptr(s_y - 1))[::max(s_x - 1, 0)];
patch[0][2] = ((const ushort2*) src.ptr(s_y - 1))[::min(s_x + 1, ((dst.cols + 1) >> 1) - 1)];
patch[1][1] = ((const ushort2*) src.ptr(s_y))[s_x];
patch[1][0] = ((const ushort2*) src.ptr(s_y))[::max(s_x - 1, 0)];
patch[1][2] = ((const ushort2*) src.ptr(s_y))[::min(s_x + 1, ((dst.cols + 1) >> 1) - 1)];
patch[2][1] = ((const ushort2*) src.ptr(s_y + 1))[s_x];
patch[2][0] = ((const ushort2*) src.ptr(s_y + 1))[::max(s_x - 1, 0)];
patch[2][2] = ((const ushort2*) src.ptr(s_y + 1))[::min(s_x + 1, ((dst.cols + 1) >> 1) - 1)];
D res0 = VecTraits<D>::all(numeric_limits<ushort>::max());
D res1 = VecTraits<D>::all(numeric_limits<ushort>::max());
if ((s_y & 1) ^ start_with_green)
{
const int t0 = (patch[0][1].x + patch[2][1].x + 1) >> 1;
const int t1 = (patch[1][0].y + patch[1][1].y + 1) >> 1;
const int t2 = (patch[0][1].x + patch[0][2].x + patch[2][1].x + patch[2][2].x + 2) >> 2;
const int t3 = (patch[0][1].y + patch[1][1].x + patch[1][2].x + patch[2][1].y + 2) >> 2;
if ((s_y & 1) ^ blue_last)
{
res0.x = t1;
res0.y = patch[1][1].x;
res0.z = t0;
res1.x = patch[1][1].y;
res1.y = t3;
res1.z = t2;
}
else
{
res0.x = t0;
res0.y = patch[1][1].x;
res0.z = t1;
res1.x = t2;
res1.y = t3;
res1.z = patch[1][1].y;
}
}
else
{
const int t0 = (patch[0][0].y + patch[0][1].y + patch[2][0].y + patch[2][1].y + 2) >> 2;
const int t1 = (patch[0][1].x + patch[1][0].y + patch[1][1].y + patch[2][1].x + 2) >> 2;
const int t2 = (patch[0][1].y + patch[2][1].y + 1) >> 1;
const int t3 = (patch[1][1].x + patch[1][2].x + 1) >> 1;
if ((s_y & 1) ^ blue_last)
{
res0.x = patch[1][1].x;
res0.y = t1;
res0.z = t0;
res1.x = t3;
res1.y = patch[1][1].y;
res1.z = t2;
}
else
{
res0.x = t0;
res0.y = t1;
res0.z = patch[1][1].x;
res1.x = t2;
res1.y = patch[1][1].y;
res1.z = t3;
}
}
const int d_x = (blockIdx.x * blockDim.x + threadIdx.x) << 1;
const int d_y = blockIdx.y * blockDim.y + threadIdx.y;
dst(d_y, d_x) = res0;
if (d_x + 1 < dst.cols)
dst(d_y, d_x + 1) = res1;
}
template <int cn>
void Bayer2BGR_8u_gpu(DevMem2Db src, DevMem2Db dst, bool blue_last, bool start_with_green, hipStream_t stream)
{
typedef typename TypeVec<uchar, cn>::vec_type dst_t;
const dim3 block(32, 8);
const dim3 grid(divUp(dst.cols, 4 * block.x), divUp(dst.rows, block.y));
cudaSafeCall( hipFuncSetCacheConfig(Bayer2BGR_8u<dst_t>, hipFuncCachePreferL1) );
hipLaunchKernelGGL(( Bayer2BGR_8u<dst_t>), dim3(grid), dim3(block), 0, stream, src, (DevMem2D_<dst_t>)dst, blue_last, start_with_green);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <int cn>
void Bayer2BGR_16u_gpu(DevMem2Db src, DevMem2Db dst, bool blue_last, bool start_with_green, hipStream_t stream)
{
typedef typename TypeVec<ushort, cn>::vec_type dst_t;
const dim3 block(32, 8);
const dim3 grid(divUp(dst.cols, 2 * block.x), divUp(dst.rows, block.y));
cudaSafeCall( hipFuncSetCacheConfig(Bayer2BGR_16u<dst_t>, hipFuncCachePreferL1) );
hipLaunchKernelGGL(( Bayer2BGR_16u<dst_t>), dim3(grid), dim3(block), 0, stream, src, (DevMem2D_<dst_t>)dst, blue_last, start_with_green);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template void Bayer2BGR_8u_gpu<3>(DevMem2Db src, DevMem2Db dst, bool blue_last, bool start_with_green, hipStream_t stream);
template void Bayer2BGR_8u_gpu<4>(DevMem2Db src, DevMem2Db dst, bool blue_last, bool start_with_green, hipStream_t stream);
template void Bayer2BGR_16u_gpu<3>(DevMem2Db src, DevMem2Db dst, bool blue_last, bool start_with_green, hipStream_t stream);
template void Bayer2BGR_16u_gpu<4>(DevMem2Db src, DevMem2Db dst, bool blue_last, bool start_with_green, hipStream_t stream);
}
}}
| 54a25c1ed9d8cda71027734dda1b1e82bfef65ba.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <opencv2/gpu/device/common.hpp>
#include <opencv2/gpu/device/vec_traits.hpp>
#include <opencv2/gpu/device/vec_math.hpp>
#include <opencv2/gpu/device/limits.hpp>
namespace cv { namespace gpu {
namespace device
{
template <typename D>
__global__ void Bayer2BGR_8u(const PtrStepb src, DevMem2D_<D> dst, const bool blue_last, const bool start_with_green)
{
const int s_x = blockIdx.x * blockDim.x + threadIdx.x;
int s_y = blockIdx.y * blockDim.y + threadIdx.y;
if (s_y >= dst.rows || (s_x << 2) >= dst.cols)
return;
s_y = ::min(::max(s_y, 1), dst.rows - 2);
uchar4 patch[3][3];
patch[0][1] = ((const uchar4*) src.ptr(s_y - 1))[s_x];
patch[0][0] = ((const uchar4*) src.ptr(s_y - 1))[::max(s_x - 1, 0)];
patch[0][2] = ((const uchar4*) src.ptr(s_y - 1))[::min(s_x + 1, ((dst.cols + 3) >> 2) - 1)];
patch[1][1] = ((const uchar4*) src.ptr(s_y))[s_x];
patch[1][0] = ((const uchar4*) src.ptr(s_y))[::max(s_x - 1, 0)];
patch[1][2] = ((const uchar4*) src.ptr(s_y))[::min(s_x + 1, ((dst.cols + 3) >> 2) - 1)];
patch[2][1] = ((const uchar4*) src.ptr(s_y + 1))[s_x];
patch[2][0] = ((const uchar4*) src.ptr(s_y + 1))[::max(s_x - 1, 0)];
patch[2][2] = ((const uchar4*) src.ptr(s_y + 1))[::min(s_x + 1, ((dst.cols + 3) >> 2) - 1)];
D res0 = VecTraits<D>::all(numeric_limits<uchar>::max());
D res1 = VecTraits<D>::all(numeric_limits<uchar>::max());
D res2 = VecTraits<D>::all(numeric_limits<uchar>::max());
D res3 = VecTraits<D>::all(numeric_limits<uchar>::max());
if ((s_y & 1) ^ start_with_green)
{
const int t0 = (patch[0][1].x + patch[2][1].x + 1) >> 1;
const int t1 = (patch[1][0].w + patch[1][1].y + 1) >> 1;
const int t2 = (patch[0][1].x + patch[0][1].z + patch[2][1].x + patch[2][1].z + 2) >> 2;
const int t3 = (patch[0][1].y + patch[1][1].x + patch[1][1].z + patch[2][1].y + 2) >> 2;
const int t4 = (patch[0][1].z + patch[2][1].z + 1) >> 1;
const int t5 = (patch[1][1].y + patch[1][1].w + 1) >> 1;
const int t6 = (patch[0][1].z + patch[0][2].x + patch[2][1].z + patch[2][2].x + 2) >> 2;
const int t7 = (patch[0][1].w + patch[1][1].z + patch[1][2].x + patch[2][1].w + 2) >> 2;
if ((s_y & 1) ^ blue_last)
{
res0.x = t1;
res0.y = patch[1][1].x;
res0.z = t0;
res1.x = patch[1][1].y;
res1.y = t3;
res1.z = t2;
res2.x = t5;
res2.y = patch[1][1].z;
res2.z = t4;
res3.x = patch[1][1].w;
res3.y = t7;
res3.z = t6;
}
else
{
res0.x = t0;
res0.y = patch[1][1].x;
res0.z = t1;
res1.x = t2;
res1.y = t3;
res1.z = patch[1][1].y;
res2.x = t4;
res2.y = patch[1][1].z;
res2.z = t5;
res3.x = t6;
res3.y = t7;
res3.z = patch[1][1].w;
}
}
else
{
const int t0 = (patch[0][0].w + patch[0][1].y + patch[2][0].w + patch[2][1].y + 2) >> 2;
const int t1 = (patch[0][1].x + patch[1][0].w + patch[1][1].y + patch[2][1].x + 2) >> 2;
const int t2 = (patch[0][1].y + patch[2][1].y + 1) >> 1;
const int t3 = (patch[1][1].x + patch[1][1].z + 1) >> 1;
const int t4 = (patch[0][1].y + patch[0][1].w + patch[2][1].y + patch[2][1].w + 2) >> 2;
const int t5 = (patch[0][1].z + patch[1][1].y + patch[1][1].w + patch[2][1].z + 2) >> 2;
const int t6 = (patch[0][1].w + patch[2][1].w + 1) >> 1;
const int t7 = (patch[1][1].z + patch[1][2].x + 1) >> 1;
if ((s_y & 1) ^ blue_last)
{
res0.x = patch[1][1].x;
res0.y = t1;
res0.z = t0;
res1.x = t3;
res1.y = patch[1][1].y;
res1.z = t2;
res2.x = patch[1][1].z;
res2.y = t5;
res2.z = t4;
res3.x = t7;
res3.y = patch[1][1].w;
res3.z = t6;
}
else
{
res0.x = t0;
res0.y = t1;
res0.z = patch[1][1].x;
res1.x = t2;
res1.y = patch[1][1].y;
res1.z = t3;
res2.x = t4;
res2.y = t5;
res2.z = patch[1][1].z;
res3.x = t6;
res3.y = patch[1][1].w;
res3.z = t7;
}
}
const int d_x = (blockIdx.x * blockDim.x + threadIdx.x) << 2;
const int d_y = blockIdx.y * blockDim.y + threadIdx.y;
dst(d_y, d_x) = res0;
if (d_x + 1 < dst.cols)
dst(d_y, d_x + 1) = res1;
if (d_x + 2 < dst.cols)
dst(d_y, d_x + 2) = res2;
if (d_x + 3 < dst.cols)
dst(d_y, d_x + 3) = res3;
}
template <typename D>
__global__ void Bayer2BGR_16u(const PtrStepb src, DevMem2D_<D> dst, const bool blue_last, const bool start_with_green)
{
const int s_x = blockIdx.x * blockDim.x + threadIdx.x;
int s_y = blockIdx.y * blockDim.y + threadIdx.y;
if (s_y >= dst.rows || (s_x << 1) >= dst.cols)
return;
s_y = ::min(::max(s_y, 1), dst.rows - 2);
ushort2 patch[3][3];
patch[0][1] = ((const ushort2*) src.ptr(s_y - 1))[s_x];
patch[0][0] = ((const ushort2*) src.ptr(s_y - 1))[::max(s_x - 1, 0)];
patch[0][2] = ((const ushort2*) src.ptr(s_y - 1))[::min(s_x + 1, ((dst.cols + 1) >> 1) - 1)];
patch[1][1] = ((const ushort2*) src.ptr(s_y))[s_x];
patch[1][0] = ((const ushort2*) src.ptr(s_y))[::max(s_x - 1, 0)];
patch[1][2] = ((const ushort2*) src.ptr(s_y))[::min(s_x + 1, ((dst.cols + 1) >> 1) - 1)];
patch[2][1] = ((const ushort2*) src.ptr(s_y + 1))[s_x];
patch[2][0] = ((const ushort2*) src.ptr(s_y + 1))[::max(s_x - 1, 0)];
patch[2][2] = ((const ushort2*) src.ptr(s_y + 1))[::min(s_x + 1, ((dst.cols + 1) >> 1) - 1)];
D res0 = VecTraits<D>::all(numeric_limits<ushort>::max());
D res1 = VecTraits<D>::all(numeric_limits<ushort>::max());
if ((s_y & 1) ^ start_with_green)
{
const int t0 = (patch[0][1].x + patch[2][1].x + 1) >> 1;
const int t1 = (patch[1][0].y + patch[1][1].y + 1) >> 1;
const int t2 = (patch[0][1].x + patch[0][2].x + patch[2][1].x + patch[2][2].x + 2) >> 2;
const int t3 = (patch[0][1].y + patch[1][1].x + patch[1][2].x + patch[2][1].y + 2) >> 2;
if ((s_y & 1) ^ blue_last)
{
res0.x = t1;
res0.y = patch[1][1].x;
res0.z = t0;
res1.x = patch[1][1].y;
res1.y = t3;
res1.z = t2;
}
else
{
res0.x = t0;
res0.y = patch[1][1].x;
res0.z = t1;
res1.x = t2;
res1.y = t3;
res1.z = patch[1][1].y;
}
}
else
{
const int t0 = (patch[0][0].y + patch[0][1].y + patch[2][0].y + patch[2][1].y + 2) >> 2;
const int t1 = (patch[0][1].x + patch[1][0].y + patch[1][1].y + patch[2][1].x + 2) >> 2;
const int t2 = (patch[0][1].y + patch[2][1].y + 1) >> 1;
const int t3 = (patch[1][1].x + patch[1][2].x + 1) >> 1;
if ((s_y & 1) ^ blue_last)
{
res0.x = patch[1][1].x;
res0.y = t1;
res0.z = t0;
res1.x = t3;
res1.y = patch[1][1].y;
res1.z = t2;
}
else
{
res0.x = t0;
res0.y = t1;
res0.z = patch[1][1].x;
res1.x = t2;
res1.y = patch[1][1].y;
res1.z = t3;
}
}
const int d_x = (blockIdx.x * blockDim.x + threadIdx.x) << 1;
const int d_y = blockIdx.y * blockDim.y + threadIdx.y;
dst(d_y, d_x) = res0;
if (d_x + 1 < dst.cols)
dst(d_y, d_x + 1) = res1;
}
template <int cn>
void Bayer2BGR_8u_gpu(DevMem2Db src, DevMem2Db dst, bool blue_last, bool start_with_green, cudaStream_t stream)
{
typedef typename TypeVec<uchar, cn>::vec_type dst_t;
const dim3 block(32, 8);
const dim3 grid(divUp(dst.cols, 4 * block.x), divUp(dst.rows, block.y));
cudaSafeCall( cudaFuncSetCacheConfig(Bayer2BGR_8u<dst_t>, cudaFuncCachePreferL1) );
Bayer2BGR_8u<dst_t><<<grid, block, 0, stream>>>(src, (DevMem2D_<dst_t>)dst, blue_last, start_with_green);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <int cn>
void Bayer2BGR_16u_gpu(DevMem2Db src, DevMem2Db dst, bool blue_last, bool start_with_green, cudaStream_t stream)
{
typedef typename TypeVec<ushort, cn>::vec_type dst_t;
const dim3 block(32, 8);
const dim3 grid(divUp(dst.cols, 2 * block.x), divUp(dst.rows, block.y));
cudaSafeCall( cudaFuncSetCacheConfig(Bayer2BGR_16u<dst_t>, cudaFuncCachePreferL1) );
Bayer2BGR_16u<dst_t><<<grid, block, 0, stream>>>(src, (DevMem2D_<dst_t>)dst, blue_last, start_with_green);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template void Bayer2BGR_8u_gpu<3>(DevMem2Db src, DevMem2Db dst, bool blue_last, bool start_with_green, cudaStream_t stream);
template void Bayer2BGR_8u_gpu<4>(DevMem2Db src, DevMem2Db dst, bool blue_last, bool start_with_green, cudaStream_t stream);
template void Bayer2BGR_16u_gpu<3>(DevMem2Db src, DevMem2Db dst, bool blue_last, bool start_with_green, cudaStream_t stream);
template void Bayer2BGR_16u_gpu<4>(DevMem2Db src, DevMem2Db dst, bool blue_last, bool start_with_green, cudaStream_t stream);
}
}}
|
e9b6796422f820d510c22dc8b7b5a6cc3ad243cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
void query_device()
{
int deviceCount = 0; // how many cuda devices are installed.
hipGetDeviceCount(&deviceCount); // for mutiple cuda device
if (deviceCount == 0)
{
printf("No CUDA support device found");
}else{
printf("More than one CUDA support device found. Modify the code for others.\n");
}
int devNo = 0; // assuming only one cuda device.
hipDeviceProp_t iProp;
hipGetDeviceProperties(&iProp, devNo);
printf("Device %d: %s\n", devNo, iProp.name);
printf(" Number of multiprocessors: %d\n",
iProp.multiProcessorCount);
printf(" clock rate : %d\n",
iProp.clockRate);
printf(" Compute capability : %d.%d\n",
iProp.major, iProp.minor);
printf(" Total amount of global memory: %4.2f KB\n",
iProp.totalGlobalMem / 1024.0);
printf(" Total amount of constant memory: %4.2f KB\n",
iProp.totalConstMem / 1024.0);
printf(" Total amount of shared memory per block: %4.2f KB\n",
iProp.sharedMemPerBlock / 1024.0);
printf(" Total amount of shared memory per MP: %4.2f KB\n",
iProp.sharedMemPerMultiprocessor / 1024.0);
printf(" Total number of registers available per block: %d\n",
iProp.regsPerBlock);
printf(" Warp size: %d\n",
iProp.warpSize);
printf(" Maximum number of threads per block: %d\n",
iProp.maxThreadsPerBlock);
printf(" Maximum number of threads per multiprocessor: %d\n",
iProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of warps per multiprocessor: %d\n",
iProp.maxThreadsPerMultiProcessor / 32);
printf(" Maximum Grid size : (%d,%d,%d)\n",
iProp.maxGridSize[0], iProp.maxGridSize[1], iProp.maxGridSize[2]);
printf(" Maximum block dimension : (%d,%d,%d)\n",
iProp.maxThreadsDim[0], iProp.maxThreadsDim[1], iProp.maxThreadsDim[2]);
}
//int main()
//{
// query_device();
//}
| e9b6796422f820d510c22dc8b7b5a6cc3ad243cb.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
void query_device()
{
int deviceCount = 0; // how many cuda devices are installed.
cudaGetDeviceCount(&deviceCount); // for mutiple cuda device
if (deviceCount == 0)
{
printf("No CUDA support device found");
}else{
printf("More than one CUDA support device found. Modify the code for others.\n");
}
int devNo = 0; // assuming only one cuda device.
cudaDeviceProp iProp;
cudaGetDeviceProperties(&iProp, devNo);
printf("Device %d: %s\n", devNo, iProp.name);
printf(" Number of multiprocessors: %d\n",
iProp.multiProcessorCount);
printf(" clock rate : %d\n",
iProp.clockRate);
printf(" Compute capability : %d.%d\n",
iProp.major, iProp.minor);
printf(" Total amount of global memory: %4.2f KB\n",
iProp.totalGlobalMem / 1024.0);
printf(" Total amount of constant memory: %4.2f KB\n",
iProp.totalConstMem / 1024.0);
printf(" Total amount of shared memory per block: %4.2f KB\n",
iProp.sharedMemPerBlock / 1024.0);
printf(" Total amount of shared memory per MP: %4.2f KB\n",
iProp.sharedMemPerMultiprocessor / 1024.0);
printf(" Total number of registers available per block: %d\n",
iProp.regsPerBlock);
printf(" Warp size: %d\n",
iProp.warpSize);
printf(" Maximum number of threads per block: %d\n",
iProp.maxThreadsPerBlock);
printf(" Maximum number of threads per multiprocessor: %d\n",
iProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of warps per multiprocessor: %d\n",
iProp.maxThreadsPerMultiProcessor / 32);
printf(" Maximum Grid size : (%d,%d,%d)\n",
iProp.maxGridSize[0], iProp.maxGridSize[1], iProp.maxGridSize[2]);
printf(" Maximum block dimension : (%d,%d,%d)\n",
iProp.maxThreadsDim[0], iProp.maxThreadsDim[1], iProp.maxThreadsDim[2]);
}
//int main()
//{
// query_device();
//}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.